From f3b7d7df6ae504b96a6538d7f27b50fbbb18eecb Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Thu, 8 Aug 2024 15:43:16 +1000 Subject: [PATCH 01/58] Add more tests. Find a bug in creating a new experiment. --- tests/view_models/test_experiment.py | 75 ++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) create mode 100644 tests/view_models/test_experiment.py diff --git a/tests/view_models/test_experiment.py b/tests/view_models/test_experiment.py new file mode 100644 index 00000000..1538f94c --- /dev/null +++ b/tests/view_models/test_experiment.py @@ -0,0 +1,75 @@ +import pytest +from fastapi.encoders import jsonable_encoder + +from mavedb.view_models.experiment import ExperimentCreate + + +def test_cannot_create_experiment_without_a_title(): + invalid_experiment = { + "shortDescription": "Test experiment", + "abstractText": "Abstract", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "field required" in str(exc_info.value) + assert "title" in str(exc_info.value) + + +def test_cannot_create_experiment_with_a_space_title(): + invalid_experiment = { + "title": " ", + "shortDescription": "Test experiment", + "abstractText": "Abstract", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "field required" in str(exc_info.value) + assert "title" in str(exc_info.value) + + +def test_cannot_create_experiment_without_a_short_description(): + invalid_experiment = { + "title": "title", + "abstractText": "Abstract", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "field required" in str(exc_info.value) + assert "shortDescription" in str(exc_info.value) + + +def test_cannot_create_experiment_without_an_abstract(): + invalid_experiment = { + "title": "title", + "shortDescription": "Test experiment", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "field required" in str(exc_info.value) + assert "abstractText" in str(exc_info.value) + + +def test_cannot_create_experiment_without_a_method(): + invalid_experiment = { + "title": "title", + "shortDescription": "Test experiment", + "abstractText": "Abstract", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "field required" in str(exc_info.value) + assert "methodText" in str(exc_info.value) \ No newline at end of file From 63be7ebef2368d225ea49c7a2811d852d913d6c7 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Fri, 9 Aug 2024 10:44:17 +1000 Subject: [PATCH 02/58] Add more tests for experiment view model, but need validators in experiment. --- tests/view_models/test_experiment.py | 107 ++++++++++++++++++++++++++- 1 file changed, 106 insertions(+), 1 deletion(-) diff --git a/tests/view_models/test_experiment.py b/tests/view_models/test_experiment.py index 1538f94c..adb3a7d3 100644 --- a/tests/view_models/test_experiment.py +++ b/tests/view_models/test_experiment.py @@ -29,7 +29,22 @@ def test_cannot_create_experiment_with_a_space_title(): with pytest.raises(ValueError) as exc_info: ExperimentCreate(**jsonable_encoder(invalid_experiment)) - assert "field required" in str(exc_info.value) + assert "Invalid title. Title should not be None or space." in str(exc_info.value) + assert "title" in str(exc_info.value) + + +def test_cannot_create_experiment_with_an_empty_title(): + invalid_experiment = { + "title": "", + "shortDescription": "Test experiment", + "abstractText": "Abstract", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "none is not an allowed value" in str(exc_info.value) assert "title" in str(exc_info.value) @@ -47,6 +62,36 @@ def test_cannot_create_experiment_without_a_short_description(): assert "shortDescription" in str(exc_info.value) +def test_cannot_create_experiment_with_a_space_short_description(): + invalid_experiment = { + "title": "title", + "shortDescription": " ", + "abstractText": "Abstract", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "Invalid short description. Short description should not be None or space" in str(exc_info.value) + assert "shortDescription" in str(exc_info.value) + + +def test_cannot_create_experiment_with_an_empty_short_description(): + invalid_experiment = { + "title": "title", + "shortDescription": "", + "abstractText": "Abstract", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "none is not an allowed value" in str(exc_info.value) + assert "shortDescription" in str(exc_info.value) + + def test_cannot_create_experiment_without_an_abstract(): invalid_experiment = { "title": "title", @@ -61,6 +106,36 @@ def test_cannot_create_experiment_without_an_abstract(): assert "abstractText" in str(exc_info.value) +def test_cannot_create_experiment_with_a_space_abstract(): + invalid_experiment = { + "title": "title", + "shortDescription": "Test experiment", + "abstractText": " ", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "Invalid abstract text. Abstract text should not be None or space." in str(exc_info.value) + assert "abstractText" in str(exc_info.value) + + +def test_cannot_create_experiment_with_an_empty_abstract(): + invalid_experiment = { + "title": "title", + "shortDescription": "Test experiment", + "abstractText": "", + "methodText": "Methods", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "none is not an allowed value" in str(exc_info.value) + assert "abstractText" in str(exc_info.value) + + def test_cannot_create_experiment_without_a_method(): invalid_experiment = { "title": "title", @@ -72,4 +147,34 @@ def test_cannot_create_experiment_without_a_method(): ExperimentCreate(**jsonable_encoder(invalid_experiment)) assert "field required" in str(exc_info.value) + assert "methodText" in str(exc_info.value) + + +def test_cannot_create_experiment_with_a_space_method(): + invalid_experiment = { + "title": "title", + "shortDescription": "Test experiment", + "abstractText": "Abstract", + "methodText": " ", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "Invalid method text. Method text should not be None or space." in str(exc_info.value) + assert "methodText" in str(exc_info.value) + + +def test_cannot_create_experiment_with_an_empty_method(): + invalid_experiment = { + "title": "title", + "shortDescription": "Test experiment", + "abstractText": "Abstract", + "methodText": "", + } + + with pytest.raises(ValueError) as exc_info: + ExperimentCreate(**jsonable_encoder(invalid_experiment)) + + assert "none is not an allowed value" in str(exc_info.value) assert "methodText" in str(exc_info.value) \ No newline at end of file From 253df237953e0cd2434ff0e40fa6060a5a808199 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Mon, 12 Aug 2024 10:46:59 +1000 Subject: [PATCH 03/58] Add tests and modified some. --- tests/view_models/test_experiment.py | 20 ++++++++++++---- tests/view_models/test_target_sequence.py | 28 +++++++---------------- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/tests/view_models/test_experiment.py b/tests/view_models/test_experiment.py index adb3a7d3..27e590eb 100644 --- a/tests/view_models/test_experiment.py +++ b/tests/view_models/test_experiment.py @@ -2,6 +2,16 @@ from fastapi.encoders import jsonable_encoder from mavedb.view_models.experiment import ExperimentCreate +from tests.helpers.constants import TEST_MINIMAL_EXPERIMENT + + +# Test valid experiment +def test_create_experiment(): + experiment = ExperimentCreate(**jsonable_encoder(TEST_MINIMAL_EXPERIMENT)) + assert experiment.title == "Test Experiment Title" + assert experiment.short_description == "Test experiment" + assert experiment.abstract_text == "Abstract" + assert experiment.method_text == "Methods" def test_cannot_create_experiment_without_a_title(): @@ -29,7 +39,7 @@ def test_cannot_create_experiment_with_a_space_title(): with pytest.raises(ValueError) as exc_info: ExperimentCreate(**jsonable_encoder(invalid_experiment)) - assert "Invalid title. Title should not be None or space." in str(exc_info.value) + assert "This field is required and cannot be empty." in str(exc_info.value) assert "title" in str(exc_info.value) @@ -73,7 +83,7 @@ def test_cannot_create_experiment_with_a_space_short_description(): with pytest.raises(ValueError) as exc_info: ExperimentCreate(**jsonable_encoder(invalid_experiment)) - assert "Invalid short description. Short description should not be None or space" in str(exc_info.value) + assert "This field is required and cannot be empty." in str(exc_info.value) assert "shortDescription" in str(exc_info.value) @@ -117,7 +127,7 @@ def test_cannot_create_experiment_with_a_space_abstract(): with pytest.raises(ValueError) as exc_info: ExperimentCreate(**jsonable_encoder(invalid_experiment)) - assert "Invalid abstract text. Abstract text should not be None or space." in str(exc_info.value) + assert "This field is required and cannot be empty." in str(exc_info.value) assert "abstractText" in str(exc_info.value) @@ -161,7 +171,7 @@ def test_cannot_create_experiment_with_a_space_method(): with pytest.raises(ValueError) as exc_info: ExperimentCreate(**jsonable_encoder(invalid_experiment)) - assert "Invalid method text. Method text should not be None or space." in str(exc_info.value) + assert "This field is required and cannot be empty." in str(exc_info.value) assert "methodText" in str(exc_info.value) @@ -177,4 +187,4 @@ def test_cannot_create_experiment_with_an_empty_method(): ExperimentCreate(**jsonable_encoder(invalid_experiment)) assert "none is not an allowed value" in str(exc_info.value) - assert "methodText" in str(exc_info.value) \ No newline at end of file + assert "methodText" in str(exc_info.value) diff --git a/tests/view_models/test_target_sequence.py b/tests/view_models/test_target_sequence.py index 5f9e8c09..b5f15e04 100644 --- a/tests/view_models/test_target_sequence.py +++ b/tests/view_models/test_target_sequence.py @@ -1,7 +1,7 @@ from mavedb.view_models.target_sequence import TargetSequenceCreate, sanitize_target_sequence_label +from tests.helpers.constants import TEST_TAXONOMY import pytest -import datetime SEQUENCE = ( @@ -15,24 +15,12 @@ "ACTATGGATGAACGAAATAGACAGATCGCTGAGATAGGTGCCTCACTGATTAAGCATTGGTAA" ) -TAXONOMY = { - "taxId": 9606, - "organismName": "Homo sapiens", - "commonName": "human", - "rank": "SPECIES", - "hasDescribedSpeciesName": True, - "articleReference": "NCBI:txid9606", - "genomeId": None, - "id": 14, - "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606", -} - def test_create_valid_target_sequence(): sequence_type = "dna" label = "sequence_label" sequence = SEQUENCE - taxonomy = TAXONOMY + taxonomy = TEST_TAXONOMY target_sequence = TargetSequenceCreate( sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label @@ -47,7 +35,7 @@ def test_target_sequence_label_is_sanitized(): sequence_type = "dna" label = " sanitize this label " sequence = SEQUENCE - taxonomy = TAXONOMY + taxonomy = TEST_TAXONOMY target_sequence = TargetSequenceCreate( sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label @@ -62,7 +50,7 @@ def test_target_sequence_label_can_be_nonetype(): sequence_type = "dna" label = None sequence = SEQUENCE - taxonomy = TAXONOMY + taxonomy = TEST_TAXONOMY target_sequence = TargetSequenceCreate( sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label @@ -77,7 +65,7 @@ def test_cannot_create_target_sequence_with_label_containing_colon(): sequence_type = "dna" label = "sequence:label" sequence = SEQUENCE - taxonomy = TAXONOMY + taxonomy = TEST_TAXONOMY with pytest.raises(ValueError) as exc_info: target_sequence = TargetSequenceCreate( @@ -91,7 +79,7 @@ def test_cannot_create_target_sequence_with_invalid_sequence_type(): sequence_type = "invalid" label = "sequence_label" sequence = SEQUENCE - taxonomy = TAXONOMY + taxonomy = TEST_TAXONOMY with pytest.raises(ValueError) as exc_info: target_sequence = TargetSequenceCreate( @@ -105,7 +93,7 @@ def test_cannot_create_target_sequence_with_invalid_inferred_type(): sequence_type = "infer" label = "sequence_label" sequence = SEQUENCE + "!" - taxonomy = TAXONOMY + taxonomy = TEST_TAXONOMY with pytest.raises(ValueError) as exc_info: target_sequence = TargetSequenceCreate( @@ -126,7 +114,7 @@ def test_cannot_create_target_sequence_with_invalid_inferred_type(): def test_cannot_create_target_sequence_with_invalid_sequence(sequence_type, exc_string): label = "sequence_label" sequence = SEQUENCE + "!" - taxonomy = TAXONOMY + taxonomy = TEST_TAXONOMY with pytest.raises(ValueError) as exc_info: target_sequence = TargetSequenceCreate( From b7d7b99c58cdedf7ec122750b1286444950e09b0 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Mon, 12 Aug 2024 16:54:54 +1000 Subject: [PATCH 04/58] Add tests. --- tests/routers/test_experiments.py | 30 +++++++++++++++++++++++++++++ tests/view_models/test_score_set.py | 2 -- 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index 43049c37..a72ab364 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -119,6 +119,36 @@ def test_can_update_own_private_experiment_set(session, client, setup_router_db) assert response.json()["title"] == "Second Experiment" +def test_can_update_own_private_experiment_short_description(session, client, setup_router_db): + experiment = create_experiment(client) + experiment_post_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) + experiment_post_payload.update({"experimentSetUrn": experiment["experimentSetUrn"], "shortDescription": "New description"}) + response = client.post("/api/v1/experiments/", json=experiment_post_payload) + assert response.status_code == 200 + assert response.json()["experimentSetUrn"] == experiment["experimentSetUrn"] + assert response.json()["shortDescription"] == "New description" + + +def test_can_update_own_private_experiment_abstract(session, client, setup_router_db): + experiment = create_experiment(client) + experiment_post_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) + experiment_post_payload.update({"experimentSetUrn": experiment["experimentSetUrn"], "abstractText": "New abstract"}) + response = client.post("/api/v1/experiments/", json=experiment_post_payload) + assert response.status_code == 200 + assert response.json()["experimentSetUrn"] == experiment["experimentSetUrn"] + assert response.json()["abstractText"] == "New abstract" + + +def test_can_update_own_private_experiment_method(session, client, setup_router_db): + experiment = create_experiment(client) + experiment_post_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) + experiment_post_payload.update({"experimentSetUrn": experiment["experimentSetUrn"], "methodText": "New method"}) + response = client.post("/api/v1/experiments/", json=experiment_post_payload) + assert response.status_code == 200 + assert response.json()["experimentSetUrn"] == experiment["experimentSetUrn"] + assert response.json()["methodText"] == "New method" + + def test_cannot_update_other_users_private_experiment_set(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index 5dc6fb3c..257aed79 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -8,8 +8,6 @@ from tests.helpers.constants import TEST_MINIMAL_SEQ_SCORESET -import datetime - def test_cannot_create_score_set_without_a_target(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() From 0c9c530a49282d701c10c700ec512ffe1e98b678 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Mon, 26 Aug 2024 17:30:41 +1000 Subject: [PATCH 05/58] Modify and add more tests. --- tests/routers/test_score_set.py | 35 +++++++ tests/view_models/test_experiment.py | 125 +++++++++--------------- tests/view_models/test_score_set.py | 141 ++++++++++++++++++++++++++- 3 files changed, 219 insertions(+), 82 deletions(-) diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index 9cb17f77..5a230471 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -681,6 +681,41 @@ def test_search_score_sets_match(session, data_provider, client, setup_router_db assert response.json()[0]["title"] == score_set_1_1["title"] +def test_search_score_sets_urn_match(session, data_provider, client, setup_router_db, data_files): + experiment_1 = create_experiment(client) + score_set_1_1 = create_seq_score_set_with_variants( + client, + session, + data_provider, + experiment_1["urn"], + data_files / "scores.csv" + ) + + search_payload = {"urn": score_set_1_1['urn']} + response = client.post("/api/v1/score-sets/search", json=search_payload) + assert response.status_code == 200 + assert len(response.json()) == 1 + assert response.json()[0]["urn"] == score_set_1_1["urn"] + + +# There is space in the end of test urn. The search result returned nothing before. +def test_search_score_sets_urn_with_space_match(session, data_provider, client, setup_router_db, data_files): + experiment_1 = create_experiment(client) + score_set_1_1 = create_seq_score_set_with_variants( + client, + session, + data_provider, + experiment_1["urn"], + data_files / "scores.csv" + ) + urn_with_space = score_set_1_1['urn'] + " " + search_payload = {"urn": urn_with_space} + response = client.post("/api/v1/score-sets/search", json=search_payload) + assert response.status_code == 200 + assert len(response.json()) == 1 + assert response.json()[0]["urn"] == score_set_1_1["urn"] + + def test_anonymous_cannot_delete_other_users_private_scoreset( session, data_provider, client, setup_router_db, data_files, anonymous_app_overrides ): diff --git a/tests/view_models/test_experiment.py b/tests/view_models/test_experiment.py index 27e590eb..77e9e472 100644 --- a/tests/view_models/test_experiment.py +++ b/tests/view_models/test_experiment.py @@ -15,176 +15,139 @@ def test_create_experiment(): def test_cannot_create_experiment_without_a_title(): - invalid_experiment = { - "shortDescription": "Test experiment", - "abstractText": "Abstract", - "methodText": "Methods", - } - + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"title"}) with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "field required" in str(exc_info.value) assert "title" in str(exc_info.value) def test_cannot_create_experiment_with_a_space_title(): - invalid_experiment = { - "title": " ", - "shortDescription": "Test experiment", - "abstractText": "Abstract", - "methodText": "Methods", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"title"}) + invalid_experiment["title"] = " " with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "This field is required and cannot be empty." in str(exc_info.value) assert "title" in str(exc_info.value) def test_cannot_create_experiment_with_an_empty_title(): - invalid_experiment = { - "title": "", - "shortDescription": "Test experiment", - "abstractText": "Abstract", - "methodText": "Methods", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"title"}) + invalid_experiment["title"] = "" with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "none is not an allowed value" in str(exc_info.value) assert "title" in str(exc_info.value) def test_cannot_create_experiment_without_a_short_description(): - invalid_experiment = { - "title": "title", - "abstractText": "Abstract", - "methodText": "Methods", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"shortDescription"}) with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "field required" in str(exc_info.value) assert "shortDescription" in str(exc_info.value) def test_cannot_create_experiment_with_a_space_short_description(): - invalid_experiment = { - "title": "title", - "shortDescription": " ", - "abstractText": "Abstract", - "methodText": "Methods", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"shortDescription"}) + invalid_experiment["shortDescription"] = " " with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "This field is required and cannot be empty." in str(exc_info.value) assert "shortDescription" in str(exc_info.value) def test_cannot_create_experiment_with_an_empty_short_description(): - invalid_experiment = { - "title": "title", - "shortDescription": "", - "abstractText": "Abstract", - "methodText": "Methods", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"shortDescription"}) + invalid_experiment["shortDescription"] = "" with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "none is not an allowed value" in str(exc_info.value) assert "shortDescription" in str(exc_info.value) def test_cannot_create_experiment_without_an_abstract(): - invalid_experiment = { - "title": "title", - "shortDescription": "Test experiment", - "methodText": "Methods", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"abstractText"}) with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "field required" in str(exc_info.value) assert "abstractText" in str(exc_info.value) def test_cannot_create_experiment_with_a_space_abstract(): - invalid_experiment = { - "title": "title", - "shortDescription": "Test experiment", - "abstractText": " ", - "methodText": "Methods", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"abstractText"}) + invalid_experiment["abstractText"] = " " with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "This field is required and cannot be empty." in str(exc_info.value) assert "abstractText" in str(exc_info.value) def test_cannot_create_experiment_with_an_empty_abstract(): - invalid_experiment = { - "title": "title", - "shortDescription": "Test experiment", - "abstractText": "", - "methodText": "Methods", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"abstractText"}) + invalid_experiment["abstractText"] = "" with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "none is not an allowed value" in str(exc_info.value) assert "abstractText" in str(exc_info.value) def test_cannot_create_experiment_without_a_method(): - invalid_experiment = { - "title": "title", - "shortDescription": "Test experiment", - "abstractText": "Abstract", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"methodText"}) with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "field required" in str(exc_info.value) assert "methodText" in str(exc_info.value) def test_cannot_create_experiment_with_a_space_method(): - invalid_experiment = { - "title": "title", - "shortDescription": "Test experiment", - "abstractText": "Abstract", - "methodText": " ", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"methodText"}) + invalid_experiment["methodText"] = " " with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "This field is required and cannot be empty." in str(exc_info.value) assert "methodText" in str(exc_info.value) def test_cannot_create_experiment_with_an_empty_method(): - invalid_experiment = { - "title": "title", - "shortDescription": "Test experiment", - "abstractText": "Abstract", - "methodText": "", - } + experiment = TEST_MINIMAL_EXPERIMENT.copy() + invalid_experiment = jsonable_encoder(experiment, exclude={"methodText"}) + invalid_experiment["methodText"] = "" with pytest.raises(ValueError) as exc_info: - ExperimentCreate(**jsonable_encoder(invalid_experiment)) + ExperimentCreate(**invalid_experiment) assert "none is not an allowed value" in str(exc_info.value) assert "methodText" in str(exc_info.value) diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index 257aed79..39c9d7d6 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -2,7 +2,7 @@ from fastapi.encoders import jsonable_encoder -from mavedb.view_models.score_set import ScoreSetModify +from mavedb.view_models.score_set import ScoreSetCreate, ScoreSetModify from mavedb.view_models.target_gene import TargetGeneCreate from mavedb.view_models.publication_identifier import PublicationIdentifierCreate @@ -65,3 +65,142 @@ def test_cannot_create_score_set_with_non_unique_target_labels(): ) assert "Target sequence labels cannot be duplicated." in str(exc_info.value) + + +def test_cannot_create_score_set_without_a_title(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"title"}) + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "field required" in str(exc_info.value) + assert "title" in str(exc_info.value) + + +def test_cannot_create_score_set_with_a_space_title(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"title"}) + invalid_score_set["title"] = " " + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "This field is required and cannot be empty." in str(exc_info.value) + assert "title" in str(exc_info.value) + + +def test_cannot_create_score_set_with_an_empty_title(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"title"}) + invalid_score_set["title"] = "" + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "none is not an allowed value" in str(exc_info.value) + assert "title" in str(exc_info.value) + + +def test_cannot_create_score_set_without_a_short_description(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"shortDescription"}) + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "field required" in str(exc_info.value) + assert "shortDescription" in str(exc_info.value) + + +def test_cannot_create_score_set_with_a_space_short_description(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"shortDescription"}) + invalid_score_set["shortDescription"] = " " + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "This field is required and cannot be empty." in str(exc_info.value) + assert "shortDescription" in str(exc_info.value) + + +def test_cannot_create_score_set_with_an_empty_short_description(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"shortDescription"}) + invalid_score_set["shortDescription"] = "" + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "none is not an allowed value" in str(exc_info.value) + assert "shortDescription" in str(exc_info.value) + + +def test_cannot_create_score_set_without_an_abstract(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"abstractText"}) + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "field required" in str(exc_info.value) + assert "abstractText" in str(exc_info.value) + + +def test_cannot_create_score_set_with_a_space_abstract(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"abstractText"}) + invalid_score_set["abstractText"] = " " + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "This field is required and cannot be empty." in str(exc_info.value) + assert "abstractText" in str(exc_info.value) + + +def test_cannot_create_score_set_with_an_empty_abstract(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"abstractText"}) + invalid_score_set["abstractText"] = "" + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "none is not an allowed value" in str(exc_info.value) + assert "abstractText" in str(exc_info.value) + + +def test_cannot_create_score_set_without_a_method(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"methodText"}) + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "field required" in str(exc_info.value) + assert "methodText" in str(exc_info.value) + + +def test_cannot_create_score_set_with_a_space_method(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"methodText"}) + invalid_score_set["methodText"] = " " + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "This field is required and cannot be empty." in str(exc_info.value) + assert "methodText" in str(exc_info.value) + + +def test_cannot_create_score_set_with_an_empty_method(): + score_set = TEST_MINIMAL_SEQ_SCORESET.copy() + invalid_score_set = jsonable_encoder(score_set, exclude={"methodText"}) + invalid_score_set["methodText"] = "" + + with pytest.raises(ValueError) as exc_info: + ScoreSetCreate(**invalid_score_set) + + assert "none is not an allowed value" in str(exc_info.value) + assert "methodText" in str(exc_info.value) From 11683ff08a0f77f5fc5ecd0ad49fb1a9a57d06f5 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 3 Sep 2024 09:57:15 -0700 Subject: [PATCH 06/58] Make Index on Contributors Orcid ID Unique --- ...42909_make_index_on_contributors_unique.py | 30 +++++++++++++++++++ src/mavedb/models/contributor.py | 2 +- 2 files changed, 31 insertions(+), 1 deletion(-) create mode 100644 alembic/versions/1cee01c42909_make_index_on_contributors_unique.py diff --git a/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py b/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py new file mode 100644 index 00000000..1f5462c1 --- /dev/null +++ b/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py @@ -0,0 +1,30 @@ +"""make index on contributors unique + +Revision ID: 1cee01c42909 +Revises: 76e1e55bc5c1 +Create Date: 2024-09-03 09:53:21.635751 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "1cee01c42909" +down_revision = "76e1e55bc5c1" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index("ix_contributors_orcid_id", table_name="contributors") + op.create_index(op.f("ix_contributors_orcid_id"), "contributors", ["orcid_id"], unique=True) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_index(op.f("ix_contributors_orcid_id"), table_name="contributors") + op.create_index("ix_contributors_orcid_id", "contributors", ["orcid_id"], unique=False) + # ### end Alembic commands ### diff --git a/src/mavedb/models/contributor.py b/src/mavedb/models/contributor.py index f2046018..ada5bcd0 100644 --- a/src/mavedb/models/contributor.py +++ b/src/mavedb/models/contributor.py @@ -7,6 +7,6 @@ class Contributor(Base): __tablename__ = "contributors" id = Column(Integer, primary_key=True) - orcid_id = Column(String, index=True, nullable=False) + orcid_id = Column(String, index=True, nullable=False, unique=True) given_name = Column(String, nullable=True) family_name = Column(String, nullable=True) From 9f297e86de1b757e3cda034f8d45f7e478d173f9 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Fri, 20 Sep 2024 12:23:58 +1000 Subject: [PATCH 07/58] Remove duplicate tests. --- tests/routers/test_experiments.py | 30 ------------------------------ 1 file changed, 30 deletions(-) diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index a72ab364..43049c37 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -119,36 +119,6 @@ def test_can_update_own_private_experiment_set(session, client, setup_router_db) assert response.json()["title"] == "Second Experiment" -def test_can_update_own_private_experiment_short_description(session, client, setup_router_db): - experiment = create_experiment(client) - experiment_post_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) - experiment_post_payload.update({"experimentSetUrn": experiment["experimentSetUrn"], "shortDescription": "New description"}) - response = client.post("/api/v1/experiments/", json=experiment_post_payload) - assert response.status_code == 200 - assert response.json()["experimentSetUrn"] == experiment["experimentSetUrn"] - assert response.json()["shortDescription"] == "New description" - - -def test_can_update_own_private_experiment_abstract(session, client, setup_router_db): - experiment = create_experiment(client) - experiment_post_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) - experiment_post_payload.update({"experimentSetUrn": experiment["experimentSetUrn"], "abstractText": "New abstract"}) - response = client.post("/api/v1/experiments/", json=experiment_post_payload) - assert response.status_code == 200 - assert response.json()["experimentSetUrn"] == experiment["experimentSetUrn"] - assert response.json()["abstractText"] == "New abstract" - - -def test_can_update_own_private_experiment_method(session, client, setup_router_db): - experiment = create_experiment(client) - experiment_post_payload = deepcopy(TEST_MINIMAL_EXPERIMENT) - experiment_post_payload.update({"experimentSetUrn": experiment["experimentSetUrn"], "methodText": "New method"}) - response = client.post("/api/v1/experiments/", json=experiment_post_payload) - assert response.status_code == 200 - assert response.json()["experimentSetUrn"] == experiment["experimentSetUrn"] - assert response.json()["methodText"] == "New method" - - def test_cannot_update_other_users_private_experiment_set(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) From 59925e95909aaba9d4c372ee00588d06afbe238f Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 18 Sep 2024 16:17:49 -0700 Subject: [PATCH 08/58] Script for transforming accession variants to fully qualified --- ..._edit_transcripts_to_be_fully_qualified.py | 56 +++++++++++++++++++ 1 file changed, 56 insertions(+) create mode 100644 alembic/manual_migrations/migrate_single_target_endogenous_edit_transcripts_to_be_fully_qualified.py diff --git a/alembic/manual_migrations/migrate_single_target_endogenous_edit_transcripts_to_be_fully_qualified.py b/alembic/manual_migrations/migrate_single_target_endogenous_edit_transcripts_to_be_fully_qualified.py new file mode 100644 index 00000000..e9008dfb --- /dev/null +++ b/alembic/manual_migrations/migrate_single_target_endogenous_edit_transcripts_to_be_fully_qualified.py @@ -0,0 +1,56 @@ +import sqlalchemy as sa +from sqlalchemy.orm import Session, configure_mappers + +from mavedb.models import * + +from mavedb.models.score_set import ScoreSet +from mavedb.models.variant import Variant +from mavedb.models.target_gene import TargetGene +from mavedb.models.target_accession import TargetAccession + +from mavedb.db.session import SessionLocal + +configure_mappers() + + +def do_migration(db: Session): + accession_based_score_sets = db.execute( + sa.select(ScoreSet).join(TargetGene).where(TargetGene.accession_id.isnot(None)) + ).scalars() + + for score_set in accession_based_score_sets: + total_targets = len( + list(db.execute(sa.select(TargetGene).where(TargetGene.score_set_id == score_set.id)).scalars()) + ) + + # Variants from score sets with multiple targets are already in the desired format. + if total_targets > 1: + continue + + target_accession = db.execute( + sa.select(TargetAccession.accession).join(TargetGene).where(TargetGene.score_set_id == score_set.id) + ).scalar() + variants = db.execute(sa.select(Variant).where(Variant.score_set_id == score_set.id)).scalars() + + if target_accession is None: + raise ValueError("target accession should never be None.") + + for variant in variants: + if variant.hgvs_nt: + variant.hgvs_nt = f"{target_accession}:{variant.hgvs_nt}" + if variant.hgvs_pro: + variant.hgvs_pro = f"{target_accession}:{variant.hgvs_pro}" + if variant.hgvs_splice: + variant.hgvs_splice = f"{target_accession}:{variant.hgvs_splice}" + + db.add(variant) + + +if __name__ == "__main__": + db = SessionLocal() + db.current_user = None # type: ignore + + do_migration(db) + + db.commit() + db.close() From 810bb7187ad54fa0170de8a6628ec34cf6bb4c3b Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 20 Sep 2024 09:52:17 -0700 Subject: [PATCH 09/58] Treat Accession Based Variants as Fully Qualified --- src/mavedb/lib/validation/dataframe.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/mavedb/lib/validation/dataframe.py b/src/mavedb/lib/validation/dataframe.py index 22414546..0ccf682f 100644 --- a/src/mavedb/lib/validation/dataframe.py +++ b/src/mavedb/lib/validation/dataframe.py @@ -454,9 +454,6 @@ def validate_hgvs_genomic_column( This function also validates all individual variants in the column and checks for agreement against the target sequence (for non-splice variants). - Implementation NOTE: We assume variants will only be presented as fully qualified (accession:variant) - if this column is being validated against multiple targets. - Parameters ---------- column : pd.Series @@ -508,12 +505,9 @@ def validate_hgvs_genomic_column( for i, s in column.items(): if s is not None: for variant in s.split(" "): - # Add accession info when we only have one target - if len(targets) == 1: - s = f"{targets[0].accession}:{variant}" try: # We set strict to `False` to suppress validation warnings about intronic variants. - vr.validate(hp.parse(s), strict=False) + vr.validate(hp.parse(variant), strict=False) except hgvs.exceptions.HGVSError as e: invalid_variants.append(f"Failed to parse row {i} with HGVS exception: {e}") From 6600940aa6b59f61fc73525d7ec2b918c6da850f Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 20 Sep 2024 10:06:20 -0700 Subject: [PATCH 10/58] Return Sub-Resources based on Permissions --- src/mavedb/routers/experiment_sets.py | 11 ++++++++++- src/mavedb/routers/experiments.py | 13 ++++++------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/mavedb/routers/experiment_sets.py b/src/mavedb/routers/experiment_sets.py index a5060ec9..eeaaf34c 100644 --- a/src/mavedb/routers/experiment_sets.py +++ b/src/mavedb/routers/experiment_sets.py @@ -6,6 +6,8 @@ from sqlalchemy.orm import Session from mavedb import deps +from mavedb.lib.authentication import get_current_user, UserData +from mavedb.lib.permissions import has_permission, Action from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context from mavedb.models.experiment_set import ExperimentSet @@ -27,7 +29,9 @@ response_model=experiment_set.ExperimentSet, responses={404: {}}, ) -def fetch_experiment_set(*, urn: str, db: Session = Depends(deps.get_db)) -> Any: +def fetch_experiment_set( + *, urn: str, db: Session = Depends(deps.get_db), user_data: UserData = Depends(get_current_user) +) -> Any: """ Fetch a single experiment set by URN. """ @@ -43,4 +47,9 @@ def fetch_experiment_set(*, urn: str, db: Session = Depends(deps.get_db)) -> Any else: item.experiments.sort(key=attrgetter("urn")) + has_permission(user_data, item, Action.READ) + + # Filter experiment sub-resources to only those experiments readable by the requesting user. + item.experiments[:] = [exp for exp in item.experiments if has_permission(user_data, exp, Action.READ).permitted] + return item diff --git a/src/mavedb/routers/experiments.py b/src/mavedb/routers/experiments.py index 08237c7e..986861f4 100644 --- a/src/mavedb/routers/experiments.py +++ b/src/mavedb/routers/experiments.py @@ -21,7 +21,7 @@ ) from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context -from mavedb.lib.permissions import assert_permission, Action +from mavedb.lib.permissions import assert_permission, has_permission, Action from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.keywords import validate_keyword_list from mavedb.lib.keywords import search_keyword @@ -174,16 +174,15 @@ def get_experiment_score_sets( db.query(ScoreSet).filter(ScoreSet.experiment_id == experiment.id).filter(~ScoreSet.superseding_score_set.has()) ) if user_data is not None: - score_set_result = score_sets.filter( - or_( - ScoreSet.private.is_(False), - and_(ScoreSet.private.is_(True), ScoreSet.created_by == user_data.user), - ) - ).all() + score_set_result = score_sets.all() else: score_set_result = score_sets.filter(ScoreSet.private.is_(False)).all() logger.debug(msg="User is anonymous; Filtering only public score sets will be shown.", extra=logging_context()) + score_set_result[:] = [ + score_set for score_set in score_set_result if has_permission(user_data, score_set, Action.READ).permitted + ] + if not score_set_result: save_to_logging_context({"associated_resources": []}) logger.info(msg="No score sets are associated with the requested experiment.", extra=logging_context()) From ee2746d640187a363f8adcbf8e937e3e199110dc Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 20 Sep 2024 10:27:00 -0700 Subject: [PATCH 11/58] Test for Contributor Supplied UnPublished Score Set on Search --- tests/routers/test_experiments.py | 113 +++++++++++++++++++----------- 1 file changed, 72 insertions(+), 41 deletions(-) diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index b04d1625..51c29073 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -134,7 +134,7 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination1(client, s "key": "Variant Library Creation Method", "value": "Endogenous locus library method", "special": False, - "description": "Description" + "description": "Description", }, }, { @@ -142,7 +142,7 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination1(client, s "key": "In Vitro Construct Library Method System", "value": "Oligo-directed mutagenic PCR", "special": False, - "description": "Description" + "description": "Description", }, }, ] @@ -151,9 +151,11 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination1(client, s response = client.post("/api/v1/experiments/", json=experiment) assert response.status_code == 422 response_data = response.json() - assert response_data["detail"] == \ - "If 'Variant Library Creation Method' is 'Endogenous locus library method', both 'Endogenous Locus " \ - "Library Method System' and 'Endogenous Locus Library Method Mechanism' must be present." + assert ( + response_data["detail"] + == "If 'Variant Library Creation Method' is 'Endogenous locus library method', both 'Endogenous Locus " + "Library Method System' and 'Endogenous Locus Library Method Mechanism' must be present." + ) def test_cannot_create_experiment_that_keywords_has_wrong_combination2(client, setup_router_db): @@ -165,7 +167,7 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination2(client, s "key": "Variant Library Creation Method", "value": "In vitro construct library method", "special": False, - "description": "Description" + "description": "Description", }, }, { @@ -173,7 +175,7 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination2(client, s "key": "Endogenous Locus Library Method System", "value": "SaCas9", "special": False, - "description": "Description" + "description": "Description", }, }, ] @@ -182,9 +184,11 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination2(client, s response = client.post("/api/v1/experiments/", json=experiment) assert response.status_code == 422 response_data = response.json() - assert response_data["detail"] == \ - "If 'Variant Library Creation Method' is 'In vitro construct library method', both 'In Vitro Construct " \ - "Library Method System' and 'In Vitro Construct Library Method Mechanism' must be present." + assert ( + response_data["detail"] + == "If 'Variant Library Creation Method' is 'In vitro construct library method', both 'In Vitro Construct " + "Library Method System' and 'In Vitro Construct Library Method Mechanism' must be present." + ) def test_cannot_create_experiment_that_keywords_has_wrong_combination3(client, setup_router_db): @@ -199,16 +203,16 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination3(client, s "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": "Description" + "description": "Description", }, - "description": "Description" + "description": "Description", }, { "keyword": { "key": "Endogenous Locus Library Method System", "value": "SaCas9", "special": False, - "description": "Description" + "description": "Description", }, }, ] @@ -217,10 +221,12 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination3(client, s response = client.post("/api/v1/experiments/", json=experiment) assert response.status_code == 422 response_data = response.json() - assert response_data["detail"] == \ - "If 'Variant Library Creation Method' is 'Other', none of 'Endogenous Locus Library Method System', " \ - "'Endogenous Locus Library Method Mechanism', 'In Vitro Construct Library Method System', or 'In Vitro " \ - "Construct Library Method Mechanism' should be present." + assert ( + response_data["detail"] + == "If 'Variant Library Creation Method' is 'Other', none of 'Endogenous Locus Library Method System', " + "'Endogenous Locus Library Method Mechanism', 'In Vitro Construct Library Method System', or 'In Vitro " + "Construct Library Method Mechanism' should be present." + ) def test_cannot_create_experiment_that_keywords_has_wrong_combination3(client, setup_router_db): @@ -235,16 +241,16 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination3(client, s "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": "Description" + "description": "Description", }, - "description": "Description" + "description": "Description", }, { "keyword": { "key": "In Vitro Construct Library Method System", "value": "Error-prone PCR", "special": False, - "description": "Description" + "description": "Description", }, }, ] @@ -253,10 +259,12 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination3(client, s response = client.post("/api/v1/experiments/", json=experiment) assert response.status_code == 422 response_data = response.json() - assert response_data["detail"] == \ - "If 'Variant Library Creation Method' is 'Other', none of 'Endogenous Locus Library Method System', " \ - "'Endogenous Locus Library Method Mechanism', 'In Vitro Construct Library Method System', or 'In Vitro " \ - "Construct Library Method Mechanism' should be present." + assert ( + response_data["detail"] + == "If 'Variant Library Creation Method' is 'Other', none of 'Endogenous Locus Library Method System', " + "'Endogenous Locus Library Method Mechanism', 'In Vitro Construct Library Method System', or 'In Vitro " + "Construct Library Method Mechanism' should be present." + ) def test_cannot_create_experiment_that_keyword_value_is_other_without_description(client, setup_router_db): @@ -271,9 +279,9 @@ def test_cannot_create_experiment_that_keyword_value_is_other_without_descriptio "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": "Description" + "description": "Description", }, - "description": None + "description": None, }, ] } @@ -281,7 +289,7 @@ def test_cannot_create_experiment_that_keyword_value_is_other_without_descriptio response = client.post("/api/v1/experiments/", json=experiment) assert response.status_code == 422 response_data = response.json() - error_messages = [error['msg'] for error in response_data["detail"]] + error_messages = [error["msg"] for error in response_data["detail"]] assert "Other option does not allow empty description." in error_messages @@ -294,16 +302,16 @@ def test_cannot_create_experiment_that_keywords_have_duplicate_keys(client, setu "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": "Description" + "description": "Description", }, - "description": "Description" + "description": "Description", }, { "keyword": { "key": "Variant Library Creation Method", "value": "In vitro construct library method", "special": False, - "description": "Description" + "description": "Description", }, }, ] @@ -327,7 +335,7 @@ def test_cannot_create_experiment_that_keywords_have_duplicate_values(client, se "key": "Delivery method", "value": "In vitro construct library method", "special": False, - "description": "Description" + "description": "Description", }, }, { @@ -335,7 +343,7 @@ def test_cannot_create_experiment_that_keywords_have_duplicate_values(client, se "key": "Variant Library Creation Method", "value": "In vitro construct library method", "special": False, - "description": "Description" + "description": "Description", }, }, ] @@ -359,18 +367,13 @@ def test_create_experiment_that_keywords_have_duplicate_others(client, setup_rou "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": "Description" + "description": "Description", }, - "description": "Description" + "description": "Description", }, { - "keyword": { - "key": "Delivery method", - "value": "Other", - "special": False, - "description": "Description" - }, - "description": "Description" + "keyword": {"key": "Delivery method", "value": "Other", "special": False, "description": "Description"}, + "description": "Description", }, ] } @@ -984,6 +987,34 @@ def test_search_score_sets_for_experiments(session, client, setup_router_db, dat assert response.json()[0]["urn"] == published_score_set["urn"] +def test_search_score_sets_for_contributor_experiments(session, client, setup_router_db, data_files, data_provider): + experiment = create_experiment(client) + score_set_pub = create_seq_score_set_with_variants( + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) + # make the unpublished score set owned by some other user. This shouldn't appear in the results. + score_set_unpub = create_seq_score_set(client, experiment["urn"], update={"title": "Unpublished Score Set"}) + published_score_set = client.post(f"/api/v1/score-sets/{score_set_pub['urn']}/publish").json() + change_ownership(session, score_set_unpub["urn"], ScoreSetDbModel) + add_contributor( + session, + score_set_unpub["urn"], + ScoreSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + + # On score set publication, the experiment will get a new urn + experiment_urn = published_score_set["experiment"]["urn"] + response = client.get(f"/api/v1/experiments/{experiment_urn}/score-sets") + assert response.status_code == 200 + response_urns = [score_set["urn"] for score_set in response.json()] + assert len(response_urns) == 2 + assert published_score_set["urn"] in response_urns + assert score_set_unpub["urn"] in response_urns + + def test_search_score_sets_for_my_experiments(session, client, setup_router_db, data_files, data_provider): experiment = create_experiment(client) score_set_pub = create_seq_score_set_with_variants( From 38d4530d5e4ab41daa037ab404270edfea872c72 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 20 Sep 2024 10:42:22 -0700 Subject: [PATCH 12/58] Remove outdated branching logic and comment for permissions todo --- src/mavedb/routers/experiments.py | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/src/mavedb/routers/experiments.py b/src/mavedb/routers/experiments.py index 986861f4..667e9764 100644 --- a/src/mavedb/routers/experiments.py +++ b/src/mavedb/routers/experiments.py @@ -165,20 +165,12 @@ def get_experiment_score_sets( assert_permission(user_data, experiment, Action.READ) - # If there is a current user with score sets associated with this experiment, return all of them. Otherwise, only show - # the public / published score sets. - # - # TODO(#182): A side effect of this implementation is that only the user who has created the experiment may view all the Score sets - # associated with a given experiment. This could be solved with user impersonation for certain user roles. - score_sets = ( - db.query(ScoreSet).filter(ScoreSet.experiment_id == experiment.id).filter(~ScoreSet.superseding_score_set.has()) + score_set_result = ( + db.query(ScoreSet) + .filter(ScoreSet.experiment_id == experiment.id) + .filter(~ScoreSet.superseding_score_set.has()) + .all() ) - if user_data is not None: - score_set_result = score_sets.all() - else: - score_set_result = score_sets.filter(ScoreSet.private.is_(False)).all() - logger.debug(msg="User is anonymous; Filtering only public score sets will be shown.", extra=logging_context()) - score_set_result[:] = [ score_set for score_set in score_set_result if has_permission(user_data, score_set, Action.READ).permitted ] From 2acd3c0378aed07808c4b09b6f956de890dabb63 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 24 Sep 2024 14:27:31 -0700 Subject: [PATCH 13/58] Fixed: Pytest hangs on async test failures when session is not explicitly closed --- tests/conftest.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 954903d9..0c6feb89 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -48,13 +48,14 @@ def session(postgresql): ) engine = create_engine(connection, echo=False, poolclass=NullPool) - session = sessionmaker(autocommit=False, autoflush=False, bind=engine) + session = sessionmaker(autocommit=False, autoflush=False, bind=engine)() Base.metadata.create_all(bind=engine) try: - yield session() + yield session finally: + session.close() Base.metadata.drop_all(bind=engine) @@ -170,7 +171,14 @@ async def on_job(ctx): @pytest.fixture def standalone_worker_context(session, data_provider, arq_redis): - yield {"db": session, "hdp": data_provider, "state": {}, "job_id": "test_job", "redis": arq_redis, "pool": futures.ProcessPoolExecutor()} + yield { + "db": session, + "hdp": data_provider, + "state": {}, + "job_id": "test_job", + "redis": arq_redis, + "pool": futures.ProcessPoolExecutor(), + } @pytest.fixture() From 3d313161ad6ce0a272cc3f26729f790aecee3dd7 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 24 Sep 2024 14:35:04 -0700 Subject: [PATCH 14/58] Fully Qualified Accession Variants --- tests/routers/data/counts_acc.csv | 6 +++--- tests/routers/data/scores_acc.csv | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/routers/data/counts_acc.csv b/tests/routers/data/counts_acc.csv index 72e9416f..ecb9ed36 100644 --- a/tests/routers/data/counts_acc.csv +++ b/tests/routers/data/counts_acc.csv @@ -1,4 +1,4 @@ hgvs_nt,c_0,c_1 -c.1G>C,10,20 -c.2A>G,8,8 -c.6C>A,90,2 +NM_001637.3:c.1G>C,10,20 +NM_001637.3:c.2A>G,8,8 +NM_001637.3:c.6C>A,90,2 diff --git a/tests/routers/data/scores_acc.csv b/tests/routers/data/scores_acc.csv index d82dcf54..30b0d836 100644 --- a/tests/routers/data/scores_acc.csv +++ b/tests/routers/data/scores_acc.csv @@ -1,4 +1,4 @@ hgvs_nt,score -c.1G>C,0.3 -c.2A>G,0.0 -c.6C>A,-1.65 +NM_001637.3:c.1G>C,0.3 +NM_001637.3:c.2A>G,0.0 +NM_001637.3:c.6C>A,-1.65 From 8ad9043776bf2e48db9cc0dcabbf27a95c7e585b Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 24 Sep 2024 14:35:26 -0700 Subject: [PATCH 15/58] Typo in Worker Variant Insertion Utility --- tests/helpers/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/helpers/util.py b/tests/helpers/util.py index 7d8c9185..eb5046f7 100644 --- a/tests/helpers/util.py +++ b/tests/helpers/util.py @@ -126,7 +126,7 @@ def mock_worker_variant_insertion(client, db, data_provider, score_set, scores_c score_df = csv_data_to_df(score_file) if counts_csv_path is not None: - with open(scores_csv_path, "rb") as score_file: + with open(scores_csv_path, "rb") as counts_file: counts_df = csv_data_to_df(counts_file) else: counts_df = None From 532f2e9d246f93e26e60918f2beb20ecfdcc3e76 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 24 Sep 2024 14:35:52 -0700 Subject: [PATCH 16/58] Validation Updates to Account for Fully Qualified Accession Based Variants --- src/mavedb/lib/validation/dataframe.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/src/mavedb/lib/validation/dataframe.py b/src/mavedb/lib/validation/dataframe.py index 0ccf682f..b02e8258 100644 --- a/src/mavedb/lib/validation/dataframe.py +++ b/src/mavedb/lib/validation/dataframe.py @@ -202,12 +202,15 @@ def validate_dataframe(df: pd.DataFrame, kind: str, targets: list["TargetGene"], if df[column_mapping[c]].isna().all() and not is_index: continue + score_set_is_accession_based = all(target.target_accession for target in targets) + score_set_is_sequence_based = all(target.target_sequence for target in targets) + # This is typesafe, despite Pylance's claims otherwise - if all(target.target_accession for target in targets): + if score_set_is_accession_based and not score_set_is_sequence_based: validate_hgvs_genomic_column( df[column_mapping[c]], is_index, [target.target_accession for target in targets], hdp # type: ignore ) - elif all(target.target_sequence for target in targets): + elif score_set_is_sequence_based and not score_set_is_accession_based: validate_hgvs_transgenic_column( df[column_mapping[c]], is_index, {target.target_sequence.label: target.target_sequence for target in targets} # type: ignore ) @@ -215,7 +218,7 @@ def validate_dataframe(df: pd.DataFrame, kind: str, targets: list["TargetGene"], raise MixedTargetError("Could not validate dataframe against provided mixed target types.") # post validation, handle prefixes. We've already established these columns are non-null - if len(targets) > 1: + if score_set_is_accession_based or len(targets) > 1: prefixes[c] = ( df[column_mapping[c]].dropna()[0].split(" ")[0].split(":")[1][0] ) # Just take the first prefix, we validate consistency elsewhere @@ -374,7 +377,7 @@ def validate_hgvs_transgenic_column(column: pd.Series, is_index: bool, targets: valid_sequence_types = ("dna", "protein") validate_variant_column(column, is_index) prefixes = generate_variant_prefixes(column) - validate_variant_formatting(column, prefixes, list(targets.keys())) + validate_variant_formatting(column, prefixes, list(targets.keys()), len(targets) > 1) observed_sequence_types = [target.sequence_type for target in targets.values()] invalid_sequence_types = set(observed_sequence_types) - set(valid_sequence_types) @@ -479,7 +482,7 @@ def validate_hgvs_genomic_column( validate_variant_column(column, is_index) prefixes = generate_variant_prefixes(column) validate_variant_formatting( - column, prefixes, [target.accession for target in targets if target.accession is not None] + column, prefixes, [target.accession for target in targets if target.accession is not None], True ) # validate the individual variant strings @@ -518,7 +521,7 @@ def validate_hgvs_genomic_column( ) -def validate_variant_formatting(column: pd.Series, prefixes: list[str], targets: list[str]): +def validate_variant_formatting(column: pd.Series, prefixes: list[str], targets: list[str], fully_qualified: bool): """ Validate the formatting of HGVS variants present in the passed column against lists of prefixes and targets @@ -548,7 +551,7 @@ def validate_variant_formatting(column: pd.Series, prefixes: list[str], targets: variants = [variant for s in column.dropna() for variant in s.split(" ")] # if there is more than one target, we expect variants to be fully qualified - if len(targets) > 1: + if fully_qualified: if not all(len(str(v).split(":")) == 2 for v in variants): raise ValidationError( f"variant column '{column.name}' needs fully qualified coordinates when validating against multiple targets" From 27c172c6bdb3887a4a68f219e400c6136bdeab30 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 24 Sep 2024 14:44:56 -0700 Subject: [PATCH 17/58] Remove Async Test Commit Comments --- tests/worker/test_jobs.py | 40 --------------------------------------- 1 file changed, 40 deletions(-) diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index d4b6a1e6..edd8b028 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -158,10 +158,6 @@ async def test_create_variants_for_score_set_with_validation_error( assert score_set.processing_state == ProcessingState.failed assert score_set.processing_errors == validation_error - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) @@ -186,10 +182,6 @@ async def test_create_variants_for_score_set_with_caught_exception( assert score_set.processing_state == ProcessingState.failed assert score_set.processing_errors == {"detail": [], "exception": ""} - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) @@ -213,10 +205,6 @@ async def test_create_variants_for_score_set_with_caught_base_exception( assert score_set.processing_state == ProcessingState.failed assert score_set.processing_errors is None - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) @@ -260,10 +248,6 @@ async def test_create_variants_for_score_set_with_existing_variants( assert score_set.processing_state == ProcessingState.success assert score_set.processing_errors is None - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) @@ -313,10 +297,6 @@ async def test_create_variants_for_score_set_with_existing_exceptions( assert score_set.processing_state == ProcessingState.success assert score_set.processing_errors is None - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) @@ -345,10 +325,6 @@ async def test_create_variants_for_score_set( assert len(db_variants) == 3 assert score_set.processing_state == ProcessingState.success - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - # NOTE: These tests operate under the assumption that mapping output is consistent between accession based and sequence based score sets. If # this assumption changes in the future, tests reflecting this difference in output should be added for accession based score sets. @@ -384,10 +360,6 @@ async def test_create_mapped_variants_for_scoreset( ).all() assert len(mapped_variants_for_score_set) == score_set.num_variants - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.skip @pytest.mark.asyncio @@ -433,10 +405,6 @@ async def test_create_mapped_variants_for_scoreset_with_existing_mapped_variants ).all() assert len(mapped_variants_for_score_set) == score_set.num_variants - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.skip @pytest.mark.asyncio @@ -468,10 +436,6 @@ async def awaitable_http_error(): ).all() assert len(mapped_variants_for_score_set) == 0 - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.skip @pytest.mark.asyncio @@ -499,10 +463,6 @@ async def test_create_mapped_variants_for_scoreset_no_mapping_output( ).all() assert len(mapped_variants_for_score_set) == 0 - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - @pytest.mark.skip @pytest.mark.asyncio From cca72ccf17328a66bb72e0556f6a3c263a51370b Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 24 Sep 2024 15:09:59 -0700 Subject: [PATCH 18/58] Dataframe Test Updates for New Fully Qualified Variant Constraint --- tests/validation/test_dataframe.py | 59 ++++++++++++++++++------------ 1 file changed, 35 insertions(+), 24 deletions(-) diff --git a/tests/validation/test_dataframe.py b/tests/validation/test_dataframe.py index 5f807094..6216cbbc 100644 --- a/tests/validation/test_dataframe.py +++ b/tests/validation/test_dataframe.py @@ -515,34 +515,34 @@ def setUp(self) -> None: self.valid_targets = ["test1", "test2"] def test_single_target_valid_variants(self): - validate_variant_formatting(self.valid, self.valid_prefixes, self.valid_target) + validate_variant_formatting(self.valid, self.valid_prefixes, self.valid_target, False) def test_single_target_inconsistent_variants(self): with self.assertRaises(ValidationError): - validate_variant_formatting(self.inconsistent, self.valid_prefixes, self.valid_target) + validate_variant_formatting(self.inconsistent, self.valid_prefixes, self.valid_target, False) def test_single_target_invalid_prefixes(self): with self.assertRaises(ValidationError): - validate_variant_formatting(self.valid, self.invalid_prefixes, self.valid_target) + validate_variant_formatting(self.valid, self.invalid_prefixes, self.valid_target, False) def test_multi_target_valid_variants(self): - validate_variant_formatting(self.valid_multi, self.valid_prefixes, self.valid_targets) + validate_variant_formatting(self.valid_multi, self.valid_prefixes, self.valid_targets, True) def test_multi_target_inconsistent_variants(self): with self.assertRaises(ValidationError): - validate_variant_formatting(self.inconsistent_multi, self.valid_prefixes, self.valid_targets) + validate_variant_formatting(self.inconsistent_multi, self.valid_prefixes, self.valid_targets, True) def test_multi_target_invalid_prefixes(self): with self.assertRaises(ValidationError): - validate_variant_formatting(self.valid_multi, self.invalid_prefixes, self.valid_targets) + validate_variant_formatting(self.valid_multi, self.invalid_prefixes, self.valid_targets, True) def test_multi_target_lacking_full_coords(self): with self.assertRaises(ValidationError): - validate_variant_formatting(self.valid, self.valid_prefixes, self.valid_targets) + validate_variant_formatting(self.valid, self.valid_prefixes, self.valid_targets, True) def test_multi_target_invalid_accessions(self): with self.assertRaises(ValidationError): - validate_variant_formatting(self.invalid_multi, self.valid_prefixes, self.valid_targets) + validate_variant_formatting(self.invalid_multi, self.valid_prefixes, self.valid_targets, True) class TestGenerateVariantPrefixes(DfTestCase): @@ -910,27 +910,38 @@ def setUp(self): self.accession_test_case = AccessionTestCase() - self.valid_hgvs_column = pd.Series(["c.1G>A", "c.2A>T"], name=hgvs_nt_column) - self.missing_data = pd.Series(["c.3T>G", None], name=hgvs_nt_column) - self.duplicate_data = pd.Series(["c.4A>G", "c.4A>G"], name=hgvs_nt_column) + self.valid_hgvs_column = pd.Series( + [f"{VALID_ACCESSION}:c.1G>A", f"{VALID_ACCESSION}:c.2A>T"], name=hgvs_nt_column + ) + self.missing_data = pd.Series([f"{VALID_ACCESSION}:c.3T>G", None], name=hgvs_nt_column) + self.duplicate_data = pd.Series([f"{VALID_ACCESSION}:c.4A>G", f"{VALID_ACCESSION}:c.4A>G"], name=hgvs_nt_column) self.invalid_hgvs_columns_by_name = [ - pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_splice_column), - pd.Series(["g.1A>G", "g.1A>T"], name=hgvs_pro_column), - pd.Series(["c.1A>G", "c.1A>T"], name=hgvs_pro_column), - pd.Series(["n.1A>G", "n.1A>T"], name=hgvs_pro_column), - pd.Series(["p.Met1Val", "p.Met1Leu"], name=hgvs_nt_column), + pd.Series([f"{VALID_ACCESSION}:g.1A>G", f"{VALID_ACCESSION}:g.1A>T"], name=hgvs_splice_column), + pd.Series([f"{VALID_ACCESSION}:g.1A>G", f"{VALID_ACCESSION}:g.1A>T"], name=hgvs_pro_column), + pd.Series([f"{VALID_ACCESSION}:c.1A>G", f"{VALID_ACCESSION}:c.1A>T"], name=hgvs_pro_column), + pd.Series([f"{VALID_ACCESSION}:n.1A>G", f"{VALID_ACCESSION}:n.1A>T"], name=hgvs_pro_column), + pd.Series([f"{VALID_ACCESSION}:p.Met1Val", f"{VALID_ACCESSION}:p.Met1Leu"], name=hgvs_nt_column), ] self.invalid_hgvs_columns_by_contents = [ - pd.Series(["r.1a>g", "r.1a>u"], name=hgvs_splice_column), # rna not allowed - pd.Series(["r.1a>g", "r.1a>u"], name=hgvs_nt_column), # rna not allowed - pd.Series(["c.1A>G", "c.5A>T"], name=hgvs_nt_column), # out of bounds for target - pd.Series(["c.1A>G", "_wt"], name=hgvs_nt_column), # old special variant - pd.Series(["p.Met1Leu", "_sy"], name=hgvs_pro_column), # old special variant - pd.Series(["n.1A>G", "c.1A>T"], name=hgvs_nt_column), # mixed prefix - pd.Series(["c.1A>G", "p.Met1Leu"], name=hgvs_pro_column), # mixed types/prefix - pd.Series(["c.1A>G", 2.5], name=hgvs_nt_column), # contains numeric + pd.Series( + [f"{VALID_ACCESSION}:r.1a>g", f"{VALID_ACCESSION}:r.1a>u"], name=hgvs_splice_column + ), # rna not allowed + pd.Series( + [f"{VALID_ACCESSION}:r.1a>g", f"{VALID_ACCESSION}:r.1a>u"], name=hgvs_nt_column + ), # rna not allowed + pd.Series( + [f"{VALID_ACCESSION}:c.1A>G", f"{VALID_ACCESSION}:c.5A>T"], name=hgvs_nt_column + ), # out of bounds for target + pd.Series([f"{VALID_ACCESSION}:c.1A>G", "_wt"], name=hgvs_nt_column), # old special variant + pd.Series([f"{VALID_ACCESSION}:p.Met1Leu", "_sy"], name=hgvs_pro_column), # old special variant + pd.Series([f"{VALID_ACCESSION}:n.1A>G", f"{VALID_ACCESSION}:c.1A>T"], name=hgvs_nt_column), # mixed prefix + pd.Series( + [f"{VALID_ACCESSION}:c.1A>G", f"{VALID_ACCESSION}:p.Met1Leu"], name=hgvs_pro_column + ), # mixed types/prefix + pd.Series(["c.1A>G", "p.Met1Leu"], name=hgvs_pro_column), # variants should be fully qualified + pd.Series([f"{VALID_ACCESSION}:c.1A>G", 2.5], name=hgvs_nt_column), # contains numeric pd.Series([1.0, 2.5], name=hgvs_nt_column), # contains numeric pd.Series([1.0, 2.5], name=hgvs_splice_column), # contains numeric pd.Series([1.0, 2.5], name=hgvs_pro_column), # contains numeric From 074c863524d8df601cb0e4b698af23d0ec4da55a Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 24 Sep 2024 15:10:35 -0700 Subject: [PATCH 19/58] Make Worker Variants Fully Qualified for Accession Tests --- tests/worker/data/counts_acc.csv | 6 +++--- tests/worker/data/scores_acc.csv | 6 +++--- tests/worker/test_jobs.py | 7 +++++-- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/tests/worker/data/counts_acc.csv b/tests/worker/data/counts_acc.csv index 72e9416f..ecb9ed36 100644 --- a/tests/worker/data/counts_acc.csv +++ b/tests/worker/data/counts_acc.csv @@ -1,4 +1,4 @@ hgvs_nt,c_0,c_1 -c.1G>C,10,20 -c.2A>G,8,8 -c.6C>A,90,2 +NM_001637.3:c.1G>C,10,20 +NM_001637.3:c.2A>G,8,8 +NM_001637.3:c.6C>A,90,2 diff --git a/tests/worker/data/scores_acc.csv b/tests/worker/data/scores_acc.csv index d82dcf54..30b0d836 100644 --- a/tests/worker/data/scores_acc.csv +++ b/tests/worker/data/scores_acc.csv @@ -1,4 +1,4 @@ hgvs_nt,score -c.1G>C,0.3 -c.2A>G,0.0 -c.6C>A,-1.65 +NM_001637.3:c.1G>C,0.3 +NM_001637.3:c.2A>G,0.0 +NM_001637.3:c.6C>A,-1.65 diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index edd8b028..b043dda9 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -35,6 +35,7 @@ TEST_MINIMAL_EXPERIMENT, TEST_MINIMAL_SEQ_SCORESET, TEST_VARIANT_MAPPING_SCAFFOLD, + VALID_ACCESSION, ) @@ -130,8 +131,10 @@ async def test_create_variants_for_score_set_with_validation_error( ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) - # This is invalid for both data sets. - scores.loc[:, HGVS_NT_COLUMN].iloc[0] = "c.1T>A" + if input_score_set == TEST_MINIMAL_SEQ_SCORESET: + scores.loc[:, HGVS_NT_COLUMN].iloc[0] = "c.1T>A" + else: + scores.loc[:, HGVS_NT_COLUMN].iloc[0] = f"{VALID_ACCESSION}:c.1T>A" with ( patch.object( From b073162ed036056cabb7f6ea465059dc4433668f Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 30 Sep 2024 10:49:23 -0700 Subject: [PATCH 20/58] Use Unchanging Identifiers for Worker Jobs Previously, worker jobs used score set URNs to identify resources on which they should operate. Due to the nature of these async jobs and the fact that URNs can change on score set publication, this was a poor choice for processes that might defer execution until a moment in time after the resource identifier had changed. This commit updates worker processes to operate on internal database identifiers to identify the resources on which they should work. This should protect us from instances where the URN changes and the worker process no longer can access the resource on which it was spawned. --- src/mavedb/routers/score_sets.py | 4 +- src/mavedb/worker/jobs.py | 84 ++++++++++++++++---------------- tests/worker/test_jobs.py | 22 ++++++--- 3 files changed, 57 insertions(+), 53 deletions(-) diff --git a/src/mavedb/routers/score_sets.py b/src/mavedb/routers/score_sets.py index 44fd2a30..b5fb0dee 100644 --- a/src/mavedb/routers/score_sets.py +++ b/src/mavedb/routers/score_sets.py @@ -632,7 +632,7 @@ async def upload_score_set_variant_data( job = await worker.enqueue_job( "create_variants_for_score_set", correlation_id_for_context(), - item.urn, + item.id, user_data.user.id, scores_df, counts_df, @@ -871,7 +871,7 @@ async def update_score_set( job = await worker.enqueue_job( "create_variants_for_score_set", correlation_id_for_context(), - item.urn, + item.id, user_data.user.id, scores_data, count_data, diff --git a/src/mavedb/worker/jobs.py b/src/mavedb/worker/jobs.py index 66a6e82d..5fd6a25d 100644 --- a/src/mavedb/worker/jobs.py +++ b/src/mavedb/worker/jobs.py @@ -54,7 +54,7 @@ async def mapping_in_execution(redis: ArqRedis, job_id: str): await redis.set(MAPPING_CURRENT_ID_NAME, "") -def setup_job_state(ctx, invoker: int, resource: str, correlation_id: str): +def setup_job_state(ctx, invoker: int, resource: Optional[str], correlation_id: str): ctx["state"][ctx["job_id"]] = { "application": "mavedb-worker", "user": invoker, @@ -90,7 +90,7 @@ async def enqueue_job_with_backoff( async def create_variants_for_score_set( - ctx, correlation_id: str, score_set_urn: str, updater_id: int, scores: pd.DataFrame, counts: pd.DataFrame + ctx, correlation_id: str, score_set_id: int, updater_id: int, scores: pd.DataFrame, counts: pd.DataFrame ): """ Create variants for a score set. Intended to be run within a worker. @@ -99,14 +99,14 @@ async def create_variants_for_score_set( """ logging_context = {} try: - logging_context = setup_job_state(ctx, updater_id, score_set_urn, correlation_id) - logger.info(msg="Began processing of score set variants.", extra=logging_context) - db: Session = ctx["db"] hdp: RESTDataProvider = ctx["hdp"] redis: ArqRedis = ctx["redis"] + score_set = db.scalars(select(ScoreSet).where(ScoreSet.id == score_set_id)).one() + + logging_context = setup_job_state(ctx, updater_id, score_set.urn, correlation_id) + logger.info(msg="Began processing of score set variants.", extra=logging_context) - score_set = db.scalars(select(ScoreSet).where(ScoreSet.urn == score_set_urn)).one() updated_by = db.scalars(select(User).where(User.id == updater_id)).one() score_set.modified_by = updated_by @@ -210,8 +210,8 @@ async def create_variants_for_score_set( logging_context["processing_state"] = score_set.processing_state.name logger.info(msg="Finished creating variants in score set.", extra=logging_context) - await redis.lpush(MAPPING_QUEUE_NAME, score_set_urn) # type: ignore - await redis.enqueue_job("variant_mapper_manager", correlation_id, score_set_urn, updater_id) + await redis.lpush(MAPPING_QUEUE_NAME, score_set.id) # type: ignore + await redis.enqueue_job("variant_mapper_manager", correlation_id, score_set.id, updater_id) score_set.mapping_state = MappingState.queued finally: db.add(score_set) @@ -224,7 +224,7 @@ async def create_variants_for_score_set( async def map_variants_for_score_set( - ctx: dict, correlation_id: str, score_set_urn: str, updater_id: int, attempt: int = 1 + ctx: dict, correlation_id: str, score_set_id: int, updater_id: int, attempt: int = 1 ) -> dict: async with mapping_in_execution(redis=ctx["redis"], job_id=ctx["job_id"]): logging_context = {} @@ -232,24 +232,27 @@ async def map_variants_for_score_set( try: db: Session = ctx["db"] redis: ArqRedis = ctx["redis"] + score_set = db.scalars(select(ScoreSet).where(ScoreSet.id == score_set_id)).one() - logging_context = setup_job_state(ctx, updater_id, score_set_urn, correlation_id) + logging_context = setup_job_state(ctx, updater_id, score_set.urn, correlation_id) logging_context["attempt"] = attempt logger.info(msg="Started variant mapping", extra=logging_context) - score_set = db.scalars(select(ScoreSet).where(ScoreSet.urn == score_set_urn)).one() score_set.mapping_state = MappingState.processing score_set.mapping_errors = null() db.add(score_set) db.commit() - logging_context["current_mapping_resource"] = score_set.urn + mapping_urn = score_set.urn + assert mapping_urn, "A valid URN is needed to map this score set." + + logging_context["current_mapping_resource"] = mapping_urn logging_context["mapping_state"] = score_set.mapping_state logger.debug(msg="Fetched score set metadata for mapping job.", extra=logging_context) # Do not block Worker event loop during mapping, see: https://arq-docs.helpmanual.io/#synchronous-jobs. vrs = vrs_mapper() - blocking = functools.partial(vrs.map_score_set, score_set_urn) + blocking = functools.partial(vrs.map_score_set, mapping_urn) loop = asyncio.get_running_loop() except Exception as e: @@ -292,13 +295,13 @@ async def map_variants_for_score_set( new_job_id = None max_retries_exceeded = None try: - await redis.lpush(MAPPING_QUEUE_NAME, score_set_urn) # type: ignore + await redis.lpush(MAPPING_QUEUE_NAME, score_set.id) # type: ignore new_job_id, max_retries_exceeded, backoff_time = await enqueue_job_with_backoff( - redis, "variant_mapper_manager", attempt, correlation_id, score_set_urn, updater_id + redis, "variant_mapper_manager", attempt, correlation_id, score_set.id, updater_id ) # If we fail to enqueue a mapping manager for this score set, evict it from the queue. if new_job_id is None: - await redis.lpop(MAPPING_QUEUE_NAME, score_set_urn) # type: ignore + await redis.lpop(MAPPING_QUEUE_NAME, score_set.id) # type: ignore logging_context["backoff_limit_exceeded"] = max_retries_exceeded logging_context["backoff_deferred_in_seconds"] = backoff_time @@ -377,7 +380,7 @@ async def map_variants_for_score_set( .join(ScoreSet) .join(TargetSequence) .where( - ScoreSet.urn == str(score_set_urn), + ScoreSet.id == score_set_id, # TargetSequence.sequence == target_sequence, ) ).one() @@ -394,9 +397,7 @@ async def map_variants_for_score_set( }, JSONB, ) - target_gene.post_mapped_metadata = cast( - {"genomic": mapped_genomic_ref}, JSONB - ) + target_gene.post_mapped_metadata = cast({"genomic": mapped_genomic_ref}, JSONB) elif computed_protein_ref and mapped_protein_ref: pre_mapped_metadata = computed_protein_ref target_gene.pre_mapped_metadata = cast( @@ -408,9 +409,7 @@ async def map_variants_for_score_set( }, JSONB, ) - target_gene.post_mapped_metadata = cast( - {"protein": mapped_protein_ref}, JSONB - ) + target_gene.post_mapped_metadata = cast({"protein": mapped_protein_ref}, JSONB) else: raise NonexistentMappingReferenceError() @@ -486,13 +485,13 @@ async def map_variants_for_score_set( new_job_id = None max_retries_exceeded = None try: - await redis.lpush(MAPPING_QUEUE_NAME, score_set_urn) # type: ignore + await redis.lpush(MAPPING_QUEUE_NAME, score_set.id) # type: ignore new_job_id, max_retries_exceeded, backoff_time = await enqueue_job_with_backoff( - redis, "variant_mapper_manager", attempt, correlation_id, score_set_urn, updater_id + redis, "variant_mapper_manager", attempt, correlation_id, score_set.id, updater_id ) # If we fail to enqueue a mapping manager for this score set, evict it from the queue. if new_job_id is None: - await redis.lpop(MAPPING_QUEUE_NAME, score_set_urn) # type: ignore + await redis.lpop(MAPPING_QUEUE_NAME, score_set.id) # type: ignore logging_context["backoff_limit_exceeded"] = max_retries_exceeded logging_context["backoff_deferred_in_seconds"] = backoff_time @@ -544,7 +543,7 @@ async def map_variants_for_score_set( async def variant_mapper_manager( - ctx: dict, correlation_id: str, score_set_urn: str, updater_id: int, attempt: int = 1 + ctx: dict, correlation_id: str, score_set_id: int, updater_id: int, attempt: int = 1 ) -> dict: logging_context = {} mapping_job_id = None @@ -552,25 +551,27 @@ async def variant_mapper_manager( try: redis: ArqRedis = ctx["redis"] db: Session = ctx["db"] + score_set = db.scalars(select(ScoreSet).where(ScoreSet.id == score_set_id)).one() - logging_context = setup_job_state(ctx, updater_id, score_set_urn, correlation_id) + logging_context = setup_job_state(ctx, updater_id, score_set.urn, correlation_id) logging_context["attempt"] = attempt logger.debug(msg="Variant mapping manager began execution", extra=logging_context) queue_length = await redis.llen(MAPPING_QUEUE_NAME) # type: ignore - queued_urn = await redis.rpop(MAPPING_QUEUE_NAME) # type: ignore + queued_id = await redis.rpop(MAPPING_QUEUE_NAME) # type: ignore logging_context["variant_mapping_queue_length"] = queue_length # Setup the job id cache if it does not already exist. if not await redis.exists(MAPPING_CURRENT_ID_NAME): await redis.set(MAPPING_CURRENT_ID_NAME, "") - if not queued_urn: + if not queued_id: logger.debug(msg="No mapping jobs exist in the queue.", extra=logging_context) return {"success": True, "enqueued_job": None} else: - queued_urn = queued_urn.decode("utf-8") - logging_context["current_mapping_resource"] = queued_urn + queued_id = queued_id.decode("utf-8") + queued_score_set = db.scalars(select(ScoreSet).where(ScoreSet.id == score_set_id)).one() + logging_context["current_mapping_resource"] = queued_score_set.urn logger.debug(msg="Found mapping job(s) still in queue.", extra=logging_context) mapping_job_id = await redis.get(MAPPING_CURRENT_ID_NAME) @@ -589,15 +590,12 @@ async def variant_mapper_manager( new_job = None new_job_id = None - score_set = None try: if not mapping_job_id or mapping_job_status in (JobStatus.not_found, JobStatus.complete): logger.debug(msg="No mapping jobs are running, queuing a new one.", extra=logging_context) - # NOTE: the score_set_urn provided to this function is only used for logging context; - # get the urn from the queue and pass that urn to map_variants_for_score_set new_job = await redis.enqueue_job( - "map_variants_for_score_set", correlation_id, queued_urn, updater_id, attempt + "map_variants_for_score_set", correlation_id, queued_score_set.id, updater_id, attempt ) if new_job: @@ -616,7 +614,7 @@ async def variant_mapper_manager( new_job = await redis.enqueue_job( "variant_mapper_manager", correlation_id, - score_set_urn, + score_set_id, updater_id, attempt, _defer_by=timedelta(minutes=5), @@ -624,7 +622,7 @@ async def variant_mapper_manager( if new_job: # Ensure this score set remains in the front of the queue. - queued_urn = await redis.rpush(MAPPING_QUEUE_NAME, score_set_urn) # type: ignore + queued_id = await redis.rpush(MAPPING_QUEUE_NAME, score_set_id) # type: ignore new_job_id = new_job.job_id logging_context["new_mapping_manager_job_id"] = new_job_id @@ -645,11 +643,11 @@ async def variant_mapper_manager( ) db.rollback() - score_set = db.scalars(select(ScoreSet).where(ScoreSet.urn == score_set_urn)).one_or_none() - if score_set: - score_set.mapping_state = MappingState.failed - score_set.mapping_errors = "Unable to queue a new mapping job or defer score set mapping." - db.add(score_set) + score_set_exc = db.scalars(select(ScoreSet).where(ScoreSet.id == score_set_id)).one_or_none() + if score_set_exc: + score_set_exc.mapping_state = MappingState.failed + score_set_exc.mapping_errors = "Unable to queue a new mapping job or defer score set mapping." + db.add(score_set_exc) db.commit() return {"success": False, "enqueued_job": new_job_id} diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index d4b6a1e6..15f684b2 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -129,6 +129,7 @@ async def test_create_variants_for_score_set_with_validation_error( input_score_set, validation_error, setup_worker_db, async_client, standalone_worker_context, session, data_files ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() # This is invalid for both data sets. scores.loc[:, HGVS_NT_COLUMN].iloc[0] = "c.1T>A" @@ -141,7 +142,7 @@ async def test_create_variants_for_score_set_with_validation_error( ) as hdp, ): success = await create_variants_for_score_set( - standalone_worker_context, uuid4().hex, score_set_urn, 1, scores, counts + standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) # Call data provider _get_transcript method if this is an accession based score set, otherwise do not. @@ -169,12 +170,13 @@ async def test_create_variants_for_score_set_with_caught_exception( input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() # This is somewhat dumb and wouldn't actually happen like this, but it serves as an effective way to guarantee # some exception will be raised no matter what in the async job. with (patch.object(pd.DataFrame, "isnull", side_effect=Exception) as mocked_exc,): success = await create_variants_for_score_set( - standalone_worker_context, uuid4().hex, score_set_urn, 1, scores, counts + standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) mocked_exc.assert_called() @@ -197,12 +199,13 @@ async def test_create_variants_for_score_set_with_caught_base_exception( input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() # This is somewhat (extra) dumb and wouldn't actually happen like this, but it serves as an effective way to guarantee # some base exception will be handled no matter what in the async job. with (patch.object(pd.DataFrame, "isnull", side_effect=BaseException),): success = await create_variants_for_score_set( - standalone_worker_context, uuid4().hex, score_set_urn, 1, scores, counts + standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) db_variants = session.scalars(select(Variant)).all() @@ -224,12 +227,13 @@ async def test_create_variants_for_score_set_with_existing_variants( input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() with patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT ) as hdp: success = await create_variants_for_score_set( - standalone_worker_context, uuid4().hex, score_set_urn, 1, scores, counts + standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) # Call data provider _get_transcript method if this is an accession based score set, otherwise do not. @@ -249,7 +253,7 @@ async def test_create_variants_for_score_set_with_existing_variants( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT ) as hdp: success = await create_variants_for_score_set( - standalone_worker_context, uuid4().hex, score_set_urn, 1, scores, counts + standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) db_variants = session.scalars(select(Variant)).all() @@ -271,6 +275,7 @@ async def test_create_variants_for_score_set_with_existing_exceptions( input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() # This is somewhat dumb and wouldn't actually happen like this, but it serves as an effective way to guarantee # some exception will be raised no matter what in the async job. @@ -280,7 +285,7 @@ async def test_create_variants_for_score_set_with_existing_exceptions( ) as mocked_exc, ): success = await create_variants_for_score_set( - standalone_worker_context, uuid4().hex, score_set_urn, 1, scores, counts + standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) mocked_exc.assert_called() @@ -296,7 +301,7 @@ async def test_create_variants_for_score_set_with_existing_exceptions( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT ) as hdp: success = await create_variants_for_score_set( - standalone_worker_context, uuid4().hex, score_set_urn, 1, scores, counts + standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) # Call data provider _get_transcript method if this is an accession based score set, otherwise do not. @@ -324,12 +329,13 @@ async def test_create_variants_for_score_set( input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() with patch.object( cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT ) as hdp: success = await create_variants_for_score_set( - standalone_worker_context, uuid4().hex, score_set_urn, 1, scores, counts + standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) # Call data provider _get_transcript method if this is an accession based score set, otherwise do not. From e3145aa6dead549af46689af01c03045ca3f9a69 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 30 Sep 2024 11:00:37 -0700 Subject: [PATCH 21/58] Make Mapping Manager Agnostic to Score Set Providing a score set to the mapping manager implied it was managing the passed score set, when it was really managing the head of the mapping queue. Eliminates that argument to the manager to make the function signature more idiomatic to the job purpose. --- src/mavedb/worker/jobs.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/src/mavedb/worker/jobs.py b/src/mavedb/worker/jobs.py index 5fd6a25d..39423648 100644 --- a/src/mavedb/worker/jobs.py +++ b/src/mavedb/worker/jobs.py @@ -211,7 +211,7 @@ async def create_variants_for_score_set( logger.info(msg="Finished creating variants in score set.", extra=logging_context) await redis.lpush(MAPPING_QUEUE_NAME, score_set.id) # type: ignore - await redis.enqueue_job("variant_mapper_manager", correlation_id, score_set.id, updater_id) + await redis.enqueue_job("variant_mapper_manager", correlation_id, updater_id) score_set.mapping_state = MappingState.queued finally: db.add(score_set) @@ -297,7 +297,7 @@ async def map_variants_for_score_set( try: await redis.lpush(MAPPING_QUEUE_NAME, score_set.id) # type: ignore new_job_id, max_retries_exceeded, backoff_time = await enqueue_job_with_backoff( - redis, "variant_mapper_manager", attempt, correlation_id, score_set.id, updater_id + redis, "variant_mapper_manager", attempt, correlation_id, updater_id ) # If we fail to enqueue a mapping manager for this score set, evict it from the queue. if new_job_id is None: @@ -487,7 +487,7 @@ async def map_variants_for_score_set( try: await redis.lpush(MAPPING_QUEUE_NAME, score_set.id) # type: ignore new_job_id, max_retries_exceeded, backoff_time = await enqueue_job_with_backoff( - redis, "variant_mapper_manager", attempt, correlation_id, score_set.id, updater_id + redis, "variant_mapper_manager", attempt, correlation_id, updater_id ) # If we fail to enqueue a mapping manager for this score set, evict it from the queue. if new_job_id is None: @@ -542,18 +542,16 @@ async def map_variants_for_score_set( return {"success": True} -async def variant_mapper_manager( - ctx: dict, correlation_id: str, score_set_id: int, updater_id: int, attempt: int = 1 -) -> dict: +async def variant_mapper_manager(ctx: dict, correlation_id: str, updater_id: int, attempt: int = 1) -> dict: logging_context = {} mapping_job_id = None mapping_job_status = None + queued_score_set = None try: redis: ArqRedis = ctx["redis"] db: Session = ctx["db"] - score_set = db.scalars(select(ScoreSet).where(ScoreSet.id == score_set_id)).one() - logging_context = setup_job_state(ctx, updater_id, score_set.urn, correlation_id) + logging_context = setup_job_state(ctx, updater_id, None, correlation_id) logging_context["attempt"] = attempt logger.debug(msg="Variant mapping manager began execution", extra=logging_context) @@ -570,8 +568,9 @@ async def variant_mapper_manager( return {"success": True, "enqueued_job": None} else: queued_id = queued_id.decode("utf-8") - queued_score_set = db.scalars(select(ScoreSet).where(ScoreSet.id == score_set_id)).one() - logging_context["current_mapping_resource"] = queued_score_set.urn + queued_score_set = db.scalars(select(ScoreSet).where(ScoreSet.id == queued_id)).one() + + logging_context["upcoming_mapping_resource"] = queued_score_set.urn logger.debug(msg="Found mapping job(s) still in queue.", extra=logging_context) mapping_job_id = await redis.get(MAPPING_CURRENT_ID_NAME) @@ -614,7 +613,6 @@ async def variant_mapper_manager( new_job = await redis.enqueue_job( "variant_mapper_manager", correlation_id, - score_set_id, updater_id, attempt, _defer_by=timedelta(minutes=5), @@ -622,7 +620,7 @@ async def variant_mapper_manager( if new_job: # Ensure this score set remains in the front of the queue. - queued_id = await redis.rpush(MAPPING_QUEUE_NAME, score_set_id) # type: ignore + queued_id = await redis.rpush(MAPPING_QUEUE_NAME, queued_score_set.id) # type: ignore new_job_id = new_job.job_id logging_context["new_mapping_manager_job_id"] = new_job_id @@ -643,7 +641,12 @@ async def variant_mapper_manager( ) db.rollback() - score_set_exc = db.scalars(select(ScoreSet).where(ScoreSet.id == score_set_id)).one_or_none() + + # We shouldn't rely on the passed score set id matching the score set we are operating upon. + if not queued_score_set: + return {"success": False, "enqueued_job": new_job_id} + + score_set_exc = db.scalars(select(ScoreSet).where(ScoreSet.id == queued_score_set.id)).one_or_none() if score_set_exc: score_set_exc.mapping_state = MappingState.failed score_set_exc.mapping_errors = "Unable to queue a new mapping job or defer score set mapping." From da60aee378f2764b2cad3697d16c456a253d9432 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 30 Sep 2024 15:18:11 -0700 Subject: [PATCH 22/58] Refresh Some Dependencies to Resolve High-sev Dependabot Alerts --- poetry.lock | 1490 +++++++++++++++++++++++++----------------------- pyproject.toml | 8 +- 2 files changed, 773 insertions(+), 725 deletions(-) diff --git a/poetry.lock b/poetry.lock index 872b5cfd..a481313c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -20,13 +20,13 @@ tz = ["python-dateutil"] [[package]] name = "anyio" -version = "4.4.0" +version = "4.6.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "anyio-4.4.0-py3-none-any.whl", hash = "sha256:c1b2d8f46a8a812513012e1107cb0e68c17159a7a594208005a57dc776e1bdc7"}, - {file = "anyio-4.4.0.tar.gz", hash = "sha256:5aadc6a1bbb7cdb0bede386cac5e2940f5e2ff3aa20277e991cf028e0585ce94"}, + {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, + {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, ] [package.dependencies] @@ -36,9 +36,9 @@ sniffio = ">=1.1" typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] -doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] -trio = ["trio (>=0.23)"] +doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +trio = ["trio (>=0.26.1)"] [[package]] name = "arq" @@ -88,16 +88,6 @@ files = [ {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, ] -[[package]] -name = "atomicwrites" -version = "1.4.1" -description = "Atomic file writes." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -files = [ - {file = "atomicwrites-1.4.1.tar.gz", hash = "sha256:81b2c9071a49367a7f770170e5eec8cb66567cfbbc8c73d20ce5ca4a8d71cf11"}, -] - [[package]] name = "attrs" version = "24.2.0" @@ -119,21 +109,18 @@ tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] [[package]] name = "authlib" -version = "0.15.6" -description = "The ultimate Python library in building OAuth and OpenID Connect servers." +version = "1.3.2" +description = "The ultimate Python library in building OAuth and OpenID Connect servers and clients." optional = true -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "Authlib-0.15.6-py2.py3-none-any.whl", hash = "sha256:6de4508ba8125e438a35bcd910d55df7087dccd3dd8517095c2bd9853c372ec1"}, - {file = "Authlib-0.15.6.tar.gz", hash = "sha256:2988fdf7d0a5c416f5a37ca4b1e7cee360094940229bc97909aed25880326c72"}, + {file = "Authlib-1.3.2-py2.py3-none-any.whl", hash = "sha256:ede026a95e9f5cdc2d4364a52103f5405e75aa156357e831ef2bfd0bc5094dfc"}, + {file = "authlib-1.3.2.tar.gz", hash = "sha256:4b16130117f9eb82aa6eec97f6dd4673c3f960ac0283ccdae2897ee4bc030ba2"}, ] [package.dependencies] cryptography = "*" -[package.extras] -client = ["requests"] - [[package]] name = "beautifulsoup4" version = "4.12.3" @@ -279,17 +266,17 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.34.159" +version = "1.34.162" description = "The AWS SDK for Python" optional = true python-versions = ">=3.8" files = [ - {file = "boto3-1.34.159-py3-none-any.whl", hash = "sha256:21120d23cc37c0e80dc4f64434bc5664d2a5645dcd9bf8a8fa97ed5c82164ca0"}, - {file = "boto3-1.34.159.tar.gz", hash = "sha256:ffe7bbb88ba81b5d54bc8fa0cfb2f3b7fe63a6cffa0f9207df2ef5c22a1c0587"}, + {file = "boto3-1.34.162-py3-none-any.whl", hash = "sha256:d6f6096bdab35a0c0deff469563b87d184a28df7689790f7fe7be98502b7c590"}, + {file = "boto3-1.34.162.tar.gz", hash = "sha256:873f8f5d2f6f85f1018cbb0535b03cceddc7b655b61f66a0a56995238804f41f"}, ] [package.dependencies] -botocore = ">=1.34.159,<1.35.0" +botocore = ">=1.34.162,<1.35.0" jmespath = ">=0.7.1,<2.0.0" s3transfer = ">=0.10.0,<0.11.0" @@ -298,13 +285,13 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "boto3-stubs" -version = "1.34.159" -description = "Type annotations for boto3 1.34.159 generated with mypy-boto3-builder 7.25.3" +version = "1.34.162" +description = "Type annotations for boto3 1.34.162 generated with mypy-boto3-builder 7.26.0" optional = false python-versions = ">=3.8" files = [ - {file = "boto3_stubs-1.34.159-py3-none-any.whl", hash = "sha256:9d5b69db82dbc55fce3d58f5d39d0680ca5334d7df596447c29357b9a0669b09"}, - {file = "boto3_stubs-1.34.159.tar.gz", hash = "sha256:d3a298f3b0a3a7758c2bfb8585089e18567c1eee695f939439ef0023e84b32ce"}, + {file = "boto3_stubs-1.34.162-py3-none-any.whl", hash = "sha256:47c651272782a2e894082087eeaeb87a7e809e7e282748560cf39c155031abef"}, + {file = "boto3_stubs-1.34.162.tar.gz", hash = "sha256:6d60b7b9652e1c99f3caba00779e1b94ba7062b0431147a00543af8b1f5252f4"}, ] [package.dependencies] @@ -355,7 +342,7 @@ bedrock-agent = ["mypy-boto3-bedrock-agent (>=1.34.0,<1.35.0)"] bedrock-agent-runtime = ["mypy-boto3-bedrock-agent-runtime (>=1.34.0,<1.35.0)"] bedrock-runtime = ["mypy-boto3-bedrock-runtime (>=1.34.0,<1.35.0)"] billingconductor = ["mypy-boto3-billingconductor (>=1.34.0,<1.35.0)"] -boto3 = ["boto3 (==1.34.159)", "botocore (==1.34.159)"] +boto3 = ["boto3 (==1.34.162)", "botocore (==1.34.162)"] braket = ["mypy-boto3-braket (>=1.34.0,<1.35.0)"] budgets = ["mypy-boto3-budgets (>=1.34.0,<1.35.0)"] ce = ["mypy-boto3-ce (>=1.34.0,<1.35.0)"] @@ -705,13 +692,13 @@ xray = ["mypy-boto3-xray (>=1.34.0,<1.35.0)"] [[package]] name = "botocore" -version = "1.34.159" +version = "1.34.162" description = "Low-level, data-driven core of boto 3." optional = true python-versions = ">=3.8" files = [ - {file = "botocore-1.34.159-py3-none-any.whl", hash = "sha256:7633062491457419a49f5860c014251ae85689f78266a3ce020c2c8688a76b97"}, - {file = "botocore-1.34.159.tar.gz", hash = "sha256:dc28806eb21e3c8d690c422530dff8b4b242ac033cbe98f160a9d37796c09cb1"}, + {file = "botocore-1.34.162-py3-none-any.whl", hash = "sha256:2d918b02db88d27a75b48275e6fb2506e9adaaddbec1ffa6a8a0898b34e769be"}, + {file = "botocore-1.34.162.tar.gz", hash = "sha256:adc23be4fb99ad31961236342b7cbf3c0bfc62532cd02852196032e8c0d682f3"}, ] [package.dependencies] @@ -727,13 +714,13 @@ crt = ["awscrt (==0.21.2)"] [[package]] name = "botocore-stubs" -version = "1.34.159" +version = "1.35.29" description = "Type annotations and code completion for botocore" optional = false -python-versions = "<4.0,>=3.8" +python-versions = ">=3.8" files = [ - {file = "botocore_stubs-1.34.159-py3-none-any.whl", hash = "sha256:b2bf4ff8dd4a39556f6338bdc7e75485282dd7e2c0b7dacdcfc219352c7865ea"}, - {file = "botocore_stubs-1.34.159.tar.gz", hash = "sha256:f9f51612960c0fa1b01638816ee3d1ca031f9efc3e1328e4f4d52253b573067a"}, + {file = "botocore_stubs-1.35.29-py3-none-any.whl", hash = "sha256:8eff9dc4e6e844baf65beb16eb2c68a173ccd50dc9323dc04d85060cacc36a05"}, + {file = "botocore_stubs-1.35.29.tar.gz", hash = "sha256:40d4cf5fc527fbad381be18cf837400d6f168a880e26ee794c8c04fa0a3e62c5"}, ] [package.dependencies] @@ -759,91 +746,108 @@ intervaltree = "*" lazy = "*" requests = "*" +[[package]] +name = "cdot" +version = "0.2.26" +description = "Transcripts for HGVS libraries" +optional = false +python-versions = ">=3.8" +files = [ + {file = "cdot-0.2.26-py3-none-any.whl", hash = "sha256:f9f6c3dbdb9dffda3779e77d9acef33ae3111c11a4de18fba5ff1d77cbc83c00"}, + {file = "cdot-0.2.26.tar.gz", hash = "sha256:6f9b9fb4076722f5d92d189fa4ef5a7e2af1cdd4f790068bb7d9a5d3ba73921b"}, +] + +[package.dependencies] +bioutils = ">=0.5.8" +intervaltree = "*" +lazy = "*" +requests = "*" + [[package]] name = "certifi" -version = "2024.7.4" +version = "2024.8.30" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, - {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, + {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, + {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, ] [[package]] name = "cffi" -version = "1.17.0" +version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = true python-versions = ">=3.8" files = [ - {file = "cffi-1.17.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9338cc05451f1942d0d8203ec2c346c830f8e86469903d5126c1f0a13a2bcbb"}, - {file = "cffi-1.17.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a0ce71725cacc9ebf839630772b07eeec220cbb5f03be1399e0457a1464f8e1a"}, - {file = "cffi-1.17.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c815270206f983309915a6844fe994b2fa47e5d05c4c4cef267c3b30e34dbe42"}, - {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6bdcd415ba87846fd317bee0774e412e8792832e7805938987e4ede1d13046d"}, - {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a98748ed1a1df4ee1d6f927e151ed6c1a09d5ec21684de879c7ea6aa96f58f2"}, - {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0a048d4f6630113e54bb4b77e315e1ba32a5a31512c31a273807d0027a7e69ab"}, - {file = "cffi-1.17.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24aa705a5f5bd3a8bcfa4d123f03413de5d86e497435693b638cbffb7d5d8a1b"}, - {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:856bf0924d24e7f93b8aee12a3a1095c34085600aa805693fb7f5d1962393206"}, - {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:4304d4416ff032ed50ad6bb87416d802e67139e31c0bde4628f36a47a3164bfa"}, - {file = "cffi-1.17.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:331ad15c39c9fe9186ceaf87203a9ecf5ae0ba2538c9e898e3a6967e8ad3db6f"}, - {file = "cffi-1.17.0-cp310-cp310-win32.whl", hash = "sha256:669b29a9eca6146465cc574659058ed949748f0809a2582d1f1a324eb91054dc"}, - {file = "cffi-1.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:48b389b1fd5144603d61d752afd7167dfd205973a43151ae5045b35793232aa2"}, - {file = "cffi-1.17.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c5d97162c196ce54af6700949ddf9409e9833ef1003b4741c2b39ef46f1d9720"}, - {file = "cffi-1.17.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ba5c243f4004c750836f81606a9fcb7841f8874ad8f3bf204ff5e56332b72b9"}, - {file = "cffi-1.17.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bb9333f58fc3a2296fb1d54576138d4cf5d496a2cc118422bd77835e6ae0b9cb"}, - {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:435a22d00ec7d7ea533db494da8581b05977f9c37338c80bc86314bec2619424"}, - {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1df34588123fcc88c872f5acb6f74ae59e9d182a2707097f9e28275ec26a12d"}, - {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:df8bb0010fdd0a743b7542589223a2816bdde4d94bb5ad67884348fa2c1c67e8"}, - {file = "cffi-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8b5b9712783415695663bd463990e2f00c6750562e6ad1d28e072a611c5f2a6"}, - {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ffef8fd58a36fb5f1196919638f73dd3ae0db1a878982b27a9a5a176ede4ba91"}, - {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4e67d26532bfd8b7f7c05d5a766d6f437b362c1bf203a3a5ce3593a645e870b8"}, - {file = "cffi-1.17.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:45f7cd36186db767d803b1473b3c659d57a23b5fa491ad83c6d40f2af58e4dbb"}, - {file = "cffi-1.17.0-cp311-cp311-win32.whl", hash = "sha256:a9015f5b8af1bb6837a3fcb0cdf3b874fe3385ff6274e8b7925d81ccaec3c5c9"}, - {file = "cffi-1.17.0-cp311-cp311-win_amd64.whl", hash = "sha256:b50aaac7d05c2c26dfd50c3321199f019ba76bb650e346a6ef3616306eed67b0"}, - {file = "cffi-1.17.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aec510255ce690d240f7cb23d7114f6b351c733a74c279a84def763660a2c3bc"}, - {file = "cffi-1.17.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2770bb0d5e3cc0e31e7318db06efcbcdb7b31bcb1a70086d3177692a02256f59"}, - {file = "cffi-1.17.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:db9a30ec064129d605d0f1aedc93e00894b9334ec74ba9c6bdd08147434b33eb"}, - {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a47eef975d2b8b721775a0fa286f50eab535b9d56c70a6e62842134cf7841195"}, - {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3e0992f23bbb0be00a921eae5363329253c3b86287db27092461c887b791e5e"}, - {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6107e445faf057c118d5050560695e46d272e5301feffda3c41849641222a828"}, - {file = "cffi-1.17.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb862356ee9391dc5a0b3cbc00f416b48c1b9a52d252d898e5b7696a5f9fe150"}, - {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c1c13185b90bbd3f8b5963cd8ce7ad4ff441924c31e23c975cb150e27c2bf67a"}, - {file = "cffi-1.17.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:17c6d6d3260c7f2d94f657e6872591fe8733872a86ed1345bda872cfc8c74885"}, - {file = "cffi-1.17.0-cp312-cp312-win32.whl", hash = "sha256:c3b8bd3133cd50f6b637bb4322822c94c5ce4bf0d724ed5ae70afce62187c492"}, - {file = "cffi-1.17.0-cp312-cp312-win_amd64.whl", hash = "sha256:dca802c8db0720ce1c49cce1149ff7b06e91ba15fa84b1d59144fef1a1bc7ac2"}, - {file = "cffi-1.17.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6ce01337d23884b21c03869d2f68c5523d43174d4fc405490eb0091057943118"}, - {file = "cffi-1.17.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cab2eba3830bf4f6d91e2d6718e0e1c14a2f5ad1af68a89d24ace0c6b17cced7"}, - {file = "cffi-1.17.0-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14b9cbc8f7ac98a739558eb86fabc283d4d564dafed50216e7f7ee62d0d25377"}, - {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b00e7bcd71caa0282cbe3c90966f738e2db91e64092a877c3ff7f19a1628fdcb"}, - {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:41f4915e09218744d8bae14759f983e466ab69b178de38066f7579892ff2a555"}, - {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e4760a68cab57bfaa628938e9c2971137e05ce48e762a9cb53b76c9b569f1204"}, - {file = "cffi-1.17.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:011aff3524d578a9412c8b3cfaa50f2c0bd78e03eb7af7aa5e0df59b158efb2f"}, - {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:a003ac9edc22d99ae1286b0875c460351f4e101f8c9d9d2576e78d7e048f64e0"}, - {file = "cffi-1.17.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ef9528915df81b8f4c7612b19b8628214c65c9b7f74db2e34a646a0a2a0da2d4"}, - {file = "cffi-1.17.0-cp313-cp313-win32.whl", hash = "sha256:70d2aa9fb00cf52034feac4b913181a6e10356019b18ef89bc7c12a283bf5f5a"}, - {file = "cffi-1.17.0-cp313-cp313-win_amd64.whl", hash = "sha256:b7b6ea9e36d32582cda3465f54c4b454f62f23cb083ebc7a94e2ca6ef011c3a7"}, - {file = "cffi-1.17.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:964823b2fc77b55355999ade496c54dde161c621cb1f6eac61dc30ed1b63cd4c"}, - {file = "cffi-1.17.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:516a405f174fd3b88829eabfe4bb296ac602d6a0f68e0d64d5ac9456194a5b7e"}, - {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dec6b307ce928e8e112a6bb9921a1cb00a0e14979bf28b98e084a4b8a742bd9b"}, - {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4094c7b464cf0a858e75cd14b03509e84789abf7b79f8537e6a72152109c76e"}, - {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2404f3de742f47cb62d023f0ba7c5a916c9c653d5b368cc966382ae4e57da401"}, - {file = "cffi-1.17.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa9d43b02a0c681f0bfbc12d476d47b2b2b6a3f9287f11ee42989a268a1833c"}, - {file = "cffi-1.17.0-cp38-cp38-win32.whl", hash = "sha256:0bb15e7acf8ab35ca8b24b90af52c8b391690ef5c4aec3d31f38f0d37d2cc499"}, - {file = "cffi-1.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:93a7350f6706b31f457c1457d3a3259ff9071a66f312ae64dc024f049055f72c"}, - {file = "cffi-1.17.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1a2ddbac59dc3716bc79f27906c010406155031a1c801410f1bafff17ea304d2"}, - {file = "cffi-1.17.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6327b572f5770293fc062a7ec04160e89741e8552bf1c358d1a23eba68166759"}, - {file = "cffi-1.17.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbc183e7bef690c9abe5ea67b7b60fdbca81aa8da43468287dae7b5c046107d4"}, - {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bdc0f1f610d067c70aa3737ed06e2726fd9d6f7bfee4a351f4c40b6831f4e82"}, - {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6d872186c1617d143969defeadac5a904e6e374183e07977eedef9c07c8953bf"}, - {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0d46ee4764b88b91f16661a8befc6bfb24806d885e27436fdc292ed7e6f6d058"}, - {file = "cffi-1.17.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f76a90c345796c01d85e6332e81cab6d70de83b829cf1d9762d0a3da59c7932"}, - {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e60821d312f99d3e1569202518dddf10ae547e799d75aef3bca3a2d9e8ee693"}, - {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:eb09b82377233b902d4c3fbeeb7ad731cdab579c6c6fda1f763cd779139e47c3"}, - {file = "cffi-1.17.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:24658baf6224d8f280e827f0a50c46ad819ec8ba380a42448e24459daf809cf4"}, - {file = "cffi-1.17.0-cp39-cp39-win32.whl", hash = "sha256:0fdacad9e0d9fc23e519efd5ea24a70348305e8d7d85ecbb1a5fa66dc834e7fb"}, - {file = "cffi-1.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:7cbc78dc018596315d4e7841c8c3a7ae31cc4d638c9b627f87d52e8abaaf2d29"}, - {file = "cffi-1.17.0.tar.gz", hash = "sha256:f3157624b7558b914cb039fd1af735e5e8049a87c817cc215109ad1c8779df76"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] [package.dependencies] @@ -1003,62 +1007,66 @@ cron = ["capturer (>=2.4)"] [[package]] name = "configparser" -version = "7.0.0" +version = "7.1.0" description = "Updated configparser from stdlib for earlier Pythons." optional = false python-versions = ">=3.8" files = [ - {file = "configparser-7.0.0-py3-none-any.whl", hash = "sha256:f46d52a12811c637104c6bb8eb33693be0038ab6bf01d69aae009c39ec8c2017"}, - {file = "configparser-7.0.0.tar.gz", hash = "sha256:af3c618a67aaaedc4d689fd7317d238f566b9aa03cae50102e92d7f0dfe78ba0"}, + {file = "configparser-7.1.0-py3-none-any.whl", hash = "sha256:98e374573c4e10e92399651e3ba1c47a438526d633c44ee96143dec26dad4299"}, + {file = "configparser-7.1.0.tar.gz", hash = "sha256:eb82646c892dbdf773dae19c633044d163c3129971ae09b49410a303b8e0a5f7"}, ] [package.extras] -docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -testing = ["pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "types-backports"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "types-backports"] [[package]] name = "cryptography" -version = "41.0.7" +version = "43.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = true python-versions = ">=3.7" files = [ - {file = "cryptography-41.0.7-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:3c78451b78313fa81607fa1b3f1ae0a5ddd8014c38a02d9db0616133987b9cdf"}, - {file = "cryptography-41.0.7-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:928258ba5d6f8ae644e764d0f996d61a8777559f72dfeb2eea7e2fe0ad6e782d"}, - {file = "cryptography-41.0.7-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a1b41bc97f1ad230a41657d9155113c7521953869ae57ac39ac7f1bb471469a"}, - {file = "cryptography-41.0.7-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:841df4caa01008bad253bce2a6f7b47f86dc9f08df4b433c404def869f590a15"}, - {file = "cryptography-41.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5429ec739a29df2e29e15d082f1d9ad683701f0ec7709ca479b3ff2708dae65a"}, - {file = "cryptography-41.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:43f2552a2378b44869fe8827aa19e69512e3245a219104438692385b0ee119d1"}, - {file = "cryptography-41.0.7-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:af03b32695b24d85a75d40e1ba39ffe7db7ffcb099fe507b39fd41a565f1b157"}, - {file = "cryptography-41.0.7-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:49f0805fc0b2ac8d4882dd52f4a3b935b210935d500b6b805f321addc8177406"}, - {file = "cryptography-41.0.7-cp37-abi3-win32.whl", hash = "sha256:f983596065a18a2183e7f79ab3fd4c475205b839e02cbc0efbbf9666c4b3083d"}, - {file = "cryptography-41.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:90452ba79b8788fa380dfb587cca692976ef4e757b194b093d845e8d99f612f2"}, - {file = "cryptography-41.0.7-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:079b85658ea2f59c4f43b70f8119a52414cdb7be34da5d019a77bf96d473b960"}, - {file = "cryptography-41.0.7-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:b640981bf64a3e978a56167594a0e97db71c89a479da8e175d8bb5be5178c003"}, - {file = "cryptography-41.0.7-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e3114da6d7f95d2dee7d3f4eec16dacff819740bbab931aff8648cb13c5ff5e7"}, - {file = "cryptography-41.0.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d5ec85080cce7b0513cfd233914eb8b7bbd0633f1d1703aa28d1dd5a72f678ec"}, - {file = "cryptography-41.0.7-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:7a698cb1dac82c35fcf8fe3417a3aaba97de16a01ac914b89a0889d364d2f6be"}, - {file = "cryptography-41.0.7-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:37a138589b12069efb424220bf78eac59ca68b95696fc622b6ccc1c0a197204a"}, - {file = "cryptography-41.0.7-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:68a2dec79deebc5d26d617bfdf6e8aab065a4f34934b22d3b5010df3ba36612c"}, - {file = "cryptography-41.0.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:09616eeaef406f99046553b8a40fbf8b1e70795a91885ba4c96a70793de5504a"}, - {file = "cryptography-41.0.7-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:48a0476626da912a44cc078f9893f292f0b3e4c739caf289268168d8f4702a39"}, - {file = "cryptography-41.0.7-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c7f3201ec47d5207841402594f1d7950879ef890c0c495052fa62f58283fde1a"}, - {file = "cryptography-41.0.7-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c5ca78485a255e03c32b513f8c2bc39fedb7f5c5f8535545bdc223a03b24f248"}, - {file = "cryptography-41.0.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:d6c391c021ab1f7a82da5d8d0b3cee2f4b2c455ec86c8aebbc84837a631ff309"}, - {file = "cryptography-41.0.7.tar.gz", hash = "sha256:13f93ce9bea8016c253b34afc6bd6a75993e5c40672ed5405a9c832f0d4a00bc"}, -] - -[package.dependencies] -cffi = ">=1.12" + {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, + {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, + {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, + {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, + {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, + {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, + {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} [package.extras] docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] -docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] nox = ["nox"] -pep8test = ["black", "check-sdist", "mypy", "ruff"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] @@ -1171,13 +1179,13 @@ test = ["pytest (>=6)"] [[package]] name = "executing" -version = "2.0.1" +version = "2.1.0" description = "Get the currently executing AST node of a frame, and other information" optional = false -python-versions = ">=3.5" +python-versions = ">=3.8" files = [ - {file = "executing-2.0.1-py2.py3-none-any.whl", hash = "sha256:eac49ca94516ccc753f9fb5ce82603156e590b27525a8bc32cce8ae302eb61bc"}, - {file = "executing-2.0.1.tar.gz", hash = "sha256:35afe2ce3affba8ee97f2d69927fa823b08b472b7b994e36a52a964b93d16147"}, + {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, + {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"}, ] [package.extras] @@ -1228,19 +1236,19 @@ test = ["anyio[trio] (>=3.2.1,<4.0.0)", "black (==23.1.0)", "coverage[toml] (>=6 [[package]] name = "filelock" -version = "3.15.4" +version = "3.16.1" description = "A platform independent file lock." optional = false python-versions = ">=3.8" files = [ - {file = "filelock-3.15.4-py3-none-any.whl", hash = "sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7"}, - {file = "filelock-3.15.4.tar.gz", hash = "sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb"}, + {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, + {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", "pytest (>=7.4.3)", "pytest-asyncio (>=0.21)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)", "virtualenv (>=20.26.2)"] -typing = ["typing-extensions (>=4.8)"] +docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4.1)"] +testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] +typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "flake8" @@ -1274,69 +1282,84 @@ dev = ["black", "flake8", "flake8-pyproject", "mypy", "pre-commit", "pytest"] [[package]] name = "greenlet" -version = "3.0.3" +version = "3.1.1" description = "Lightweight in-process concurrent programming" optional = false python-versions = ">=3.7" files = [ - {file = "greenlet-3.0.3-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:9da2bd29ed9e4f15955dd1595ad7bc9320308a3b766ef7f837e23ad4b4aac31a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d353cadd6083fdb056bb46ed07e4340b0869c305c8ca54ef9da3421acbdf6881"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dca1e2f3ca00b84a396bc1bce13dd21f680f035314d2379c4160c98153b2059b"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ed7fb269f15dc662787f4119ec300ad0702fa1b19d2135a37c2c4de6fadfd4a"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd4f49ae60e10adbc94b45c0b5e6a179acc1736cf7a90160b404076ee283cf83"}, - {file = "greenlet-3.0.3-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:73a411ef564e0e097dbe7e866bb2dda0f027e072b04da387282b02c308807405"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7f362975f2d179f9e26928c5b517524e89dd48530a0202570d55ad6ca5d8a56f"}, - {file = "greenlet-3.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:649dde7de1a5eceb258f9cb00bdf50e978c9db1b996964cd80703614c86495eb"}, - {file = "greenlet-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:68834da854554926fbedd38c76e60c4a2e3198c6fbed520b106a8986445caaf9"}, - {file = "greenlet-3.0.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:b1b5667cced97081bf57b8fa1d6bfca67814b0afd38208d52538316e9422fc61"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52f59dd9c96ad2fc0d5724107444f76eb20aaccb675bf825df6435acb7703559"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:afaff6cf5200befd5cec055b07d1c0a5a06c040fe5ad148abcd11ba6ab9b114e"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe754d231288e1e64323cfad462fcee8f0288654c10bdf4f603a39ed923bef33"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2797aa5aedac23af156bbb5a6aa2cd3427ada2972c828244eb7d1b9255846379"}, - {file = "greenlet-3.0.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b7f009caad047246ed379e1c4dbcb8b020f0a390667ea74d2387be2998f58a22"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c5e1536de2aad7bf62e27baf79225d0d64360d4168cf2e6becb91baf1ed074f3"}, - {file = "greenlet-3.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:894393ce10ceac937e56ec00bb71c4c2f8209ad516e96033e4b3b1de270e200d"}, - {file = "greenlet-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:1ea188d4f49089fc6fb283845ab18a2518d279c7cd9da1065d7a84e991748728"}, - {file = "greenlet-3.0.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:70fb482fdf2c707765ab5f0b6655e9cfcf3780d8d87355a063547b41177599be"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4d1ac74f5c0c0524e4a24335350edad7e5f03b9532da7ea4d3c54d527784f2e"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:149e94a2dd82d19838fe4b2259f1b6b9957d5ba1b25640d2380bea9c5df37676"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15d79dd26056573940fcb8c7413d84118086f2ec1a8acdfa854631084393efcc"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b7db1ebff4ba09aaaeae6aa491daeb226c8150fc20e836ad00041bcb11230"}, - {file = "greenlet-3.0.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fcd2469d6a2cf298f198f0487e0a5b1a47a42ca0fa4dfd1b6862c999f018ebbf"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1f672519db1796ca0d8753f9e78ec02355e862d0998193038c7073045899f305"}, - {file = "greenlet-3.0.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2516a9957eed41dd8f1ec0c604f1cdc86758b587d964668b5b196a9db5bfcde6"}, - {file = "greenlet-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:bba5387a6975598857d86de9eac14210a49d554a77eb8261cc68b7d082f78ce2"}, - {file = "greenlet-3.0.3-cp37-cp37m-macosx_11_0_universal2.whl", hash = "sha256:5b51e85cb5ceda94e79d019ed36b35386e8c37d22f07d6a751cb659b180d5274"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:daf3cb43b7cf2ba96d614252ce1684c1bccee6b2183a01328c98d36fcd7d5cb0"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:99bf650dc5d69546e076f413a87481ee1d2d09aaaaaca058c9251b6d8c14783f"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dd6e660effd852586b6a8478a1d244b8dc90ab5b1321751d2ea15deb49ed414"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3391d1e16e2a5a1507d83e4a8b100f4ee626e8eca43cf2cadb543de69827c4c"}, - {file = "greenlet-3.0.3-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e1f145462f1fa6e4a4ae3c0f782e580ce44d57c8f2c7aae1b6fa88c0b2efdb41"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1a7191e42732df52cb5f39d3527217e7ab73cae2cb3694d241e18f53d84ea9a7"}, - {file = "greenlet-3.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0448abc479fab28b00cb472d278828b3ccca164531daab4e970a0458786055d6"}, - {file = "greenlet-3.0.3-cp37-cp37m-win32.whl", hash = "sha256:b542be2440edc2d48547b5923c408cbe0fc94afb9f18741faa6ae970dbcb9b6d"}, - {file = "greenlet-3.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:01bc7ea167cf943b4c802068e178bbf70ae2e8c080467070d01bfa02f337ee67"}, - {file = "greenlet-3.0.3-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:1996cb9306c8595335bb157d133daf5cf9f693ef413e7673cb07e3e5871379ca"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ddc0f794e6ad661e321caa8d2f0a55ce01213c74722587256fb6566049a8b04"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9db1c18f0eaad2f804728c67d6c610778456e3e1cc4ab4bbd5eeb8e6053c6fc"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7170375bcc99f1a2fbd9c306f5be8764eaf3ac6b5cb968862cad4c7057756506"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b66c9c1e7ccabad3a7d037b2bcb740122a7b17a53734b7d72a344ce39882a1b"}, - {file = "greenlet-3.0.3-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:098d86f528c855ead3479afe84b49242e174ed262456c342d70fc7f972bc13c4"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:81bb9c6d52e8321f09c3d165b2a78c680506d9af285bfccbad9fb7ad5a5da3e5"}, - {file = "greenlet-3.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fd096eb7ffef17c456cfa587523c5f92321ae02427ff955bebe9e3c63bc9f0da"}, - {file = "greenlet-3.0.3-cp38-cp38-win32.whl", hash = "sha256:d46677c85c5ba00a9cb6f7a00b2bfa6f812192d2c9f7d9c4f6a55b60216712f3"}, - {file = "greenlet-3.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:419b386f84949bf0e7c73e6032e3457b82a787c1ab4a0e43732898a761cc9dbf"}, - {file = "greenlet-3.0.3-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:da70d4d51c8b306bb7a031d5cff6cc25ad253affe89b70352af5f1cb68e74b53"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:086152f8fbc5955df88382e8a75984e2bb1c892ad2e3c80a2508954e52295257"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d73a9fe764d77f87f8ec26a0c85144d6a951a6c438dfe50487df5595c6373eac"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7dcbe92cc99f08c8dd11f930de4d99ef756c3591a5377d1d9cd7dd5e896da71"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1551a8195c0d4a68fac7a4325efac0d541b48def35feb49d803674ac32582f61"}, - {file = "greenlet-3.0.3-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:64d7675ad83578e3fc149b617a444fab8efdafc9385471f868eb5ff83e446b8b"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b37eef18ea55f2ffd8f00ff8fe7c8d3818abd3e25fb73fae2ca3b672e333a7a6"}, - {file = "greenlet-3.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:77457465d89b8263bca14759d7c1684df840b6811b2499838cc5b040a8b5b113"}, - {file = "greenlet-3.0.3-cp39-cp39-win32.whl", hash = "sha256:57e8974f23e47dac22b83436bdcf23080ade568ce77df33159e019d161ce1d1e"}, - {file = "greenlet-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c5ee858cfe08f34712f548c3c363e807e7186f03ad7a5039ebadb29e8c6be067"}, - {file = "greenlet-3.0.3.tar.gz", hash = "sha256:43374442353259554ce33599da8b692d5aa96f8976d567d4badf263371fbe491"}, + {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, + {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, + {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, + {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, + {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3b2813dc3de8c1ee3f924e4d4227999285fd335d1bcc0d2be6dc3f1f6a318ec1"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e347b3bfcf985a05e8c0b7d462ba6f15b1ee1c909e2dcad795e49e91b152c383"}, + {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e8f8c9cb53cdac7ba9793c276acd90168f416b9ce36799b9b885790f8ad6c0a"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62ee94988d6b4722ce0028644418d93a52429e977d742ca2ccbe1c4f4a792511"}, + {file = "greenlet-3.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1776fd7f989fc6b8d8c8cb8da1f6b82c5814957264d1f6cf818d475ec2bf6395"}, + {file = "greenlet-3.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:48ca08c771c268a768087b408658e216133aecd835c0ded47ce955381105ba39"}, + {file = "greenlet-3.1.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:4afe7ea89de619adc868e087b4d2359282058479d7cfb94970adf4b55284574d"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f406b22b7c9a9b4f8aa9d2ab13d6ae0ac3e85c9a809bd590ad53fed2bf70dc79"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c3a701fe5a9695b238503ce5bbe8218e03c3bcccf7e204e455e7462d770268aa"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2846930c65b47d70b9d178e89c7e1a69c95c1f68ea5aa0a58646b7a96df12441"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99cfaa2110534e2cf3ba31a7abcac9d328d1d9f1b95beede58294a60348fba36"}, + {file = "greenlet-3.1.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1443279c19fca463fc33e65ef2a935a5b09bb90f978beab37729e1c3c6c25fe9"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b7cede291382a78f7bb5f04a529cb18e068dd29e0fb27376074b6d0317bf4dd0"}, + {file = "greenlet-3.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:23f20bb60ae298d7d8656c6ec6db134bca379ecefadb0b19ce6f19d1f232a942"}, + {file = "greenlet-3.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:7124e16b4c55d417577c2077be379514321916d5790fa287c9ed6f23bd2ffd01"}, + {file = "greenlet-3.1.1-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:05175c27cb459dcfc05d026c4232f9de8913ed006d42713cb8a5137bd49375f1"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:935e943ec47c4afab8965954bf49bfa639c05d4ccf9ef6e924188f762145c0ff"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:667a9706c970cb552ede35aee17339a18e8f2a87a51fba2ed39ceeeb1004798a"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b8a678974d1f3aa55f6cc34dc480169d58f2e6d8958895d68845fa4ab566509e"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efc0f674aa41b92da8c49e0346318c6075d734994c3c4e4430b1c3f853e498e4"}, + {file = "greenlet-3.1.1-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0153404a4bb921f0ff1abeb5ce8a5131da56b953eda6e14b88dc6bbc04d2049e"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:275f72decf9932639c1c6dd1013a1bc266438eb32710016a1c742df5da6e60a1"}, + {file = "greenlet-3.1.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c4aab7f6381f38a4b42f269057aee279ab0fc7bf2e929e3d4abfae97b682a12c"}, + {file = "greenlet-3.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:b42703b1cf69f2aa1df7d1030b9d77d3e584a70755674d60e710f0af570f3761"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1695e76146579f8c06c1509c7ce4dfe0706f49c6831a817ac04eebb2fd02011"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7876452af029456b3f3549b696bb36a06db7c90747740c5302f74a9e9fa14b13"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ead44c85f8ab905852d3de8d86f6f8baf77109f9da589cb4fa142bd3b57b475"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8320f64b777d00dd7ccdade271eaf0cad6636343293a25074cc5566160e4de7b"}, + {file = "greenlet-3.1.1-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6510bf84a6b643dabba74d3049ead221257603a253d0a9873f55f6a59a65f822"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:04b013dc07c96f83134b1e99888e7a79979f1a247e2a9f59697fa14b5862ed01"}, + {file = "greenlet-3.1.1-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:411f015496fec93c1c8cd4e5238da364e1da7a124bcb293f085bf2860c32c6f6"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:47da355d8687fd65240c364c90a31569a133b7b60de111c255ef5b606f2ae291"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:98884ecf2ffb7d7fe6bd517e8eb99d31ff7855a840fa6d0d63cd07c037f6a981"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1d4aeb8891338e60d1ab6127af1fe45def5259def8094b9c7e34690c8858803"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db32b5348615a04b82240cc67983cb315309e88d444a288934ee6ceaebcad6cc"}, + {file = "greenlet-3.1.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dcc62f31eae24de7f8dce72134c8651c58000d3b1868e01392baea7c32c247de"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1d3755bcb2e02de341c55b4fca7a745a24a9e7212ac953f6b3a48d117d7257aa"}, + {file = "greenlet-3.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b8da394b34370874b4572676f36acabac172602abf054cbc4ac910219f3340af"}, + {file = "greenlet-3.1.1-cp37-cp37m-win32.whl", hash = "sha256:a0dfc6c143b519113354e780a50381508139b07d2177cb6ad6a08278ec655798"}, + {file = "greenlet-3.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:54558ea205654b50c438029505def3834e80f0869a70fb15b871c29b4575ddef"}, + {file = "greenlet-3.1.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:346bed03fe47414091be4ad44786d1bd8bef0c3fcad6ed3dee074a032ab408a9"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dfc59d69fc48664bc693842bd57acfdd490acafda1ab52c7836e3fc75c90a111"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d21e10da6ec19b457b82636209cbe2331ff4306b54d06fa04b7c138ba18c8a81"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37b9de5a96111fc15418819ab4c4432e4f3c2ede61e660b1e33971eba26ef9ba"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef9ea3f137e5711f0dbe5f9263e8c009b7069d8a1acea822bd5e9dae0ae49c8"}, + {file = "greenlet-3.1.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85f3ff71e2e60bd4b4932a043fbbe0f499e263c628390b285cb599154a3b03b1"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95ffcf719966dd7c453f908e208e14cde192e09fde6c7186c8f1896ef778d8cd"}, + {file = "greenlet-3.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:03a088b9de532cbfe2ba2034b2b85e82df37874681e8c470d6fb2f8c04d7e4b7"}, + {file = "greenlet-3.1.1-cp38-cp38-win32.whl", hash = "sha256:8b8b36671f10ba80e159378df9c4f15c14098c4fd73a36b9ad715f057272fbef"}, + {file = "greenlet-3.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:7017b2be767b9d43cc31416aba48aab0d2309ee31b4dbf10a1d38fb7972bdf9d"}, + {file = "greenlet-3.1.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:396979749bd95f018296af156201d6211240e7a23090f50a8d5d18c370084dc3"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9d0ff5ad43e785350894d97e13633a66e2b50000e8a183a50a88d834752d42"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f6ff3b14f2df4c41660a7dec01045a045653998784bf8cfcb5a525bdffffbc8f"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94ebba31df2aa506d7b14866fed00ac141a867e63143fe5bca82a8e503b36437"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73aaad12ac0ff500f62cebed98d8789198ea0e6f233421059fa68a5aa7220145"}, + {file = "greenlet-3.1.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:63e4844797b975b9af3a3fb8f7866ff08775f5426925e1e0bbcfe7932059a12c"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7939aa3ca7d2a1593596e7ac6d59391ff30281ef280d8632fa03d81f7c5f955e"}, + {file = "greenlet-3.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d0028e725ee18175c6e422797c407874da24381ce0690d6b9396c204c7f7276e"}, + {file = "greenlet-3.1.1-cp39-cp39-win32.whl", hash = "sha256:5e06afd14cbaf9e00899fae69b24a32f2196c19de08fcb9f4779dd4f004e5e7c"}, + {file = "greenlet-3.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:3319aa75e0e0639bc15ff54ca327e8dc7a6fe404003496e3c6925cd3142e0e22"}, + {file = "greenlet-3.1.1.tar.gz", hash = "sha256:4ce3ac6cdb6adf7946475d7ef31777c26d94bccc377e070a7986bd2d5c515467"}, ] [package.extras] @@ -1611,13 +1634,13 @@ pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_ve [[package]] name = "identify" -version = "2.6.0" +version = "2.6.1" description = "File identification library for Python" optional = false python-versions = ">=3.8" files = [ - {file = "identify-2.6.0-py2.py3-none-any.whl", hash = "sha256:e79ae4406387a9d300332b5fd366d8994f1525e8414984e1a59e058b2eda2dd0"}, - {file = "identify-2.6.0.tar.gz", hash = "sha256:cb171c685bdc31bcc4c1734698736a7d5b6c8bf2e0c15117f4d469c8640ae5cf"}, + {file = "identify-2.6.1-py2.py3-none-any.whl", hash = "sha256:53863bcac7caf8d2ed85bd20312ea5dcfc22226800f6d6881f232d861db5a8f0"}, + {file = "identify-2.6.1.tar.gz", hash = "sha256:91478c5fb7c3aac5ff7bf9b4344f803843dc586832d5f110d672b19aa1984c98"}, ] [package.extras] @@ -1625,15 +1648,18 @@ license = ["ukkonen"] [[package]] name = "idna" -version = "3.7" +version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" optional = false -python-versions = ">=3.5" +python-versions = ">=3.6" files = [ - {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, - {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, ] +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + [[package]] name = "idutils" version = "1.2.1" @@ -1654,22 +1680,26 @@ tests = ["pytest-black (>=0.3.0,<0.3.10)", "pytest-cache (>=1.0)", "pytest-inven [[package]] name = "importlib-metadata" -version = "8.2.0" +version = "8.5.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "importlib_metadata-8.2.0-py3-none-any.whl", hash = "sha256:11901fa0c2f97919b288679932bb64febaeacf289d18ac84dd68cb2e74213369"}, - {file = "importlib_metadata-8.2.0.tar.gz", hash = "sha256:72e8d4399996132204f9a16dcc751af254a48f8d1b20b9ff0f98d4a8f901e73d"}, + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, ] [package.dependencies] -zipp = ">=0.5" +zipp = ">=3.20" [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] perf = ["ipython"] -test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"] +test = ["flufl.flake8", "importlib-resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"] +type = ["pytest-mypy"] [[package]] name = "iniconfig" @@ -2374,19 +2404,19 @@ ptyprocess = ">=0.5" [[package]] name = "platformdirs" -version = "4.2.2" +version = "4.3.6" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" files = [ - {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, - {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, + {file = "platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb"}, + {file = "platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907"}, ] [package.extras] -docs = ["furo (>=2023.9.10)", "proselint (>=0.13)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.25.2)"] -test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)"] -type = ["mypy (>=1.8)"] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.2)", "pytest-cov (>=5)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.11.2)"] [[package]] name = "pluggy" @@ -2405,13 +2435,13 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "port-for" -version = "0.7.2" +version = "0.7.3" description = "Utility that helps with local TCP ports management. It can find an unused TCP localhost port and remember the association." optional = false python-versions = ">=3.8" files = [ - {file = "port-for-0.7.2.tar.gz", hash = "sha256:074f29335130578aa42fef3726985e57d01c15189e509633a8a1b0b7f9226349"}, - {file = "port_for-0.7.2-py3-none-any.whl", hash = "sha256:16b279ab4f210bad33515c45bd9af0c6e048ab24c3b6bbd9cfc7e451782617df"}, + {file = "port_for-0.7.3-py3-none-any.whl", hash = "sha256:786fa1171cee23093a475d65228b4a9877d249827ceb7cd2362cb7b80d0c69d4"}, + {file = "port_for-0.7.3.tar.gz", hash = "sha256:2d597e5854a1b323b17eba8ae0630784c779857abde5e22444c88d233a60f953"}, ] [[package]] @@ -2434,13 +2464,13 @@ virtualenv = ">=20.10.0" [[package]] name = "prompt-toolkit" -version = "3.0.47" +version = "3.0.48" description = "Library for building powerful interactive command lines in Python" optional = false python-versions = ">=3.7.0" files = [ - {file = "prompt_toolkit-3.0.47-py3-none-any.whl", hash = "sha256:0d7bfa67001d5e39d02c224b663abc33687405033a8c422d0d675a5a13361d10"}, - {file = "prompt_toolkit-3.0.47.tar.gz", hash = "sha256:1e1b29cb58080b1e69f207c893a1a7bf16d127a5c30c9d17a25a5d77792e5360"}, + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, ] [package.dependencies] @@ -2477,26 +2507,26 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "psycopg" -version = "3.2.1" +version = "3.2.3" description = "PostgreSQL database adapter for Python" optional = false python-versions = ">=3.8" files = [ - {file = "psycopg-3.2.1-py3-none-any.whl", hash = "sha256:ece385fb413a37db332f97c49208b36cf030ff02b199d7635ed2fbd378724175"}, - {file = "psycopg-3.2.1.tar.gz", hash = "sha256:dc8da6dc8729dacacda3cc2f17d2c9397a70a66cf0d2b69c91065d60d5f00cb7"}, + {file = "psycopg-3.2.3-py3-none-any.whl", hash = "sha256:644d3973fe26908c73d4be746074f6e5224b03c1101d302d9a53bf565ad64907"}, + {file = "psycopg-3.2.3.tar.gz", hash = "sha256:a5764f67c27bec8bfac85764d23c534af2c27b893550377e37ce59c12aac47a2"}, ] [package.dependencies] -typing-extensions = ">=4.4" +typing-extensions = {version = ">=4.6", markers = "python_version < \"3.13\""} tzdata = {version = "*", markers = "sys_platform == \"win32\""} [package.extras] -binary = ["psycopg-binary (==3.2.1)"] -c = ["psycopg-c (==3.2.1)"] -dev = ["ast-comments (>=1.1.2)", "black (>=24.1.0)", "codespell (>=2.2)", "dnspython (>=2.1)", "flake8 (>=4.0)", "mypy (>=1.6)", "types-setuptools (>=57.4)", "wheel (>=0.37)"] +binary = ["psycopg-binary (==3.2.3)"] +c = ["psycopg-c (==3.2.3)"] +dev = ["ast-comments (>=1.1.2)", "black (>=24.1.0)", "codespell (>=2.2)", "dnspython (>=2.1)", "flake8 (>=4.0)", "mypy (>=1.11)", "types-setuptools (>=57.4)", "wheel (>=0.37)"] docs = ["Sphinx (>=5.0)", "furo (==2022.6.21)", "sphinx-autobuild (>=2021.3.14)", "sphinx-autodoc-typehints (>=1.12)"] pool = ["psycopg-pool"] -test = ["anyio (>=4.0)", "mypy (>=1.6)", "pproxy (>=2.7)", "pytest (>=6.2.5)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.5)"] +test = ["anyio (>=4.0)", "mypy (>=1.11)", "pproxy (>=2.7)", "pytest (>=6.2.5)", "pytest-cov (>=3.0)", "pytest-randomly (>=3.5)"] [[package]] name = "psycopg2" @@ -2545,26 +2575,15 @@ files = [ [package.extras] tests = ["pytest"] -[[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] - [[package]] name = "pyasn1" -version = "0.6.0" +version = "0.6.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" optional = true python-versions = ">=3.8" files = [ - {file = "pyasn1-0.6.0-py2.py3-none-any.whl", hash = "sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473"}, - {file = "pyasn1-0.6.0.tar.gz", hash = "sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c"}, + {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, + {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] [[package]] @@ -2591,54 +2610,54 @@ files = [ [[package]] name = "pydantic" -version = "1.10.17" +version = "1.10.18" description = "Data validation and settings management using python type hints" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.17-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0fa51175313cc30097660b10eec8ca55ed08bfa07acbfe02f7a42f6c242e9a4b"}, - {file = "pydantic-1.10.17-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c7e8988bb16988890c985bd2093df9dd731bfb9d5e0860db054c23034fab8f7a"}, - {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:371dcf1831f87c9e217e2b6a0c66842879a14873114ebb9d0861ab22e3b5bb1e"}, - {file = "pydantic-1.10.17-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4866a1579c0c3ca2c40575398a24d805d4db6cb353ee74df75ddeee3c657f9a7"}, - {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:543da3c6914795b37785703ffc74ba4d660418620cc273490d42c53949eeeca6"}, - {file = "pydantic-1.10.17-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7623b59876f49e61c2e283551cc3647616d2fbdc0b4d36d3d638aae8547ea681"}, - {file = "pydantic-1.10.17-cp310-cp310-win_amd64.whl", hash = "sha256:409b2b36d7d7d19cd8310b97a4ce6b1755ef8bd45b9a2ec5ec2b124db0a0d8f3"}, - {file = "pydantic-1.10.17-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fa43f362b46741df8f201bf3e7dff3569fa92069bcc7b4a740dea3602e27ab7a"}, - {file = "pydantic-1.10.17-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2a72d2a5ff86a3075ed81ca031eac86923d44bc5d42e719d585a8eb547bf0c9b"}, - {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4ad32aed3bf5eea5ca5decc3d1bbc3d0ec5d4fbcd72a03cdad849458decbc63"}, - {file = "pydantic-1.10.17-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aeb4e741782e236ee7dc1fb11ad94dc56aabaf02d21df0e79e0c21fe07c95741"}, - {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d2f89a719411cb234105735a520b7c077158a81e0fe1cb05a79c01fc5eb59d3c"}, - {file = "pydantic-1.10.17-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db3b48d9283d80a314f7a682f7acae8422386de659fffaba454b77a083c3937d"}, - {file = "pydantic-1.10.17-cp311-cp311-win_amd64.whl", hash = "sha256:9c803a5113cfab7bbb912f75faa4fc1e4acff43e452c82560349fff64f852e1b"}, - {file = "pydantic-1.10.17-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:820ae12a390c9cbb26bb44913c87fa2ff431a029a785642c1ff11fed0a095fcb"}, - {file = "pydantic-1.10.17-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c1e51d1af306641b7d1574d6d3307eaa10a4991542ca324f0feb134fee259815"}, - {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e53fb834aae96e7b0dadd6e92c66e7dd9cdf08965340ed04c16813102a47fab"}, - {file = "pydantic-1.10.17-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e2495309b1266e81d259a570dd199916ff34f7f51f1b549a0d37a6d9b17b4dc"}, - {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:098ad8de840c92ea586bf8efd9e2e90c6339d33ab5c1cfbb85be66e4ecf8213f"}, - {file = "pydantic-1.10.17-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:525bbef620dac93c430d5d6bdbc91bdb5521698d434adf4434a7ef6ffd5c4b7f"}, - {file = "pydantic-1.10.17-cp312-cp312-win_amd64.whl", hash = "sha256:6654028d1144df451e1da69a670083c27117d493f16cf83da81e1e50edce72ad"}, - {file = "pydantic-1.10.17-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c87cedb4680d1614f1d59d13fea353faf3afd41ba5c906a266f3f2e8c245d655"}, - {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11289fa895bcbc8f18704efa1d8020bb9a86314da435348f59745473eb042e6b"}, - {file = "pydantic-1.10.17-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:94833612d6fd18b57c359a127cbfd932d9150c1b72fea7c86ab58c2a77edd7c7"}, - {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:d4ecb515fa7cb0e46e163ecd9d52f9147ba57bc3633dca0e586cdb7a232db9e3"}, - {file = "pydantic-1.10.17-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7017971ffa7fd7808146880aa41b266e06c1e6e12261768a28b8b41ba55c8076"}, - {file = "pydantic-1.10.17-cp37-cp37m-win_amd64.whl", hash = "sha256:e840e6b2026920fc3f250ea8ebfdedf6ea7a25b77bf04c6576178e681942ae0f"}, - {file = "pydantic-1.10.17-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bfbb18b616abc4df70591b8c1ff1b3eabd234ddcddb86b7cac82657ab9017e33"}, - {file = "pydantic-1.10.17-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebb249096d873593e014535ab07145498957091aa6ae92759a32d40cb9998e2e"}, - {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8c209af63ccd7b22fba94b9024e8b7fd07feffee0001efae50dd99316b27768"}, - {file = "pydantic-1.10.17-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d4b40c9e13a0b61583e5599e7950490c700297b4a375b55b2b592774332798b7"}, - {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c31d281c7485223caf6474fc2b7cf21456289dbaa31401844069b77160cab9c7"}, - {file = "pydantic-1.10.17-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ae5184e99a060a5c80010a2d53c99aee76a3b0ad683d493e5f0620b5d86eeb75"}, - {file = "pydantic-1.10.17-cp38-cp38-win_amd64.whl", hash = "sha256:ad1e33dc6b9787a6f0f3fd132859aa75626528b49cc1f9e429cdacb2608ad5f0"}, - {file = "pydantic-1.10.17-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17c0ee7192e54a10943f245dc79e36d9fe282418ea05b886e1c666063a7b54"}, - {file = "pydantic-1.10.17-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cafb9c938f61d1b182dfc7d44a7021326547b7b9cf695db5b68ec7b590214773"}, - {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95ef534e3c22e5abbdbdd6f66b6ea9dac3ca3e34c5c632894f8625d13d084cbe"}, - {file = "pydantic-1.10.17-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d96b8799ae3d782df7ec9615cb59fc32c32e1ed6afa1b231b0595f6516e8ab"}, - {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ab2f976336808fd5d539fdc26eb51f9aafc1f4b638e212ef6b6f05e753c8011d"}, - {file = "pydantic-1.10.17-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8ad363330557beac73159acfbeed220d5f1bfcd6b930302a987a375e02f74fd"}, - {file = "pydantic-1.10.17-cp39-cp39-win_amd64.whl", hash = "sha256:48db882e48575ce4b39659558b2f9f37c25b8d348e37a2b4e32971dd5a7d6227"}, - {file = "pydantic-1.10.17-py3-none-any.whl", hash = "sha256:e41b5b973e5c64f674b3b4720286ded184dcc26a691dd55f34391c62c6934688"}, - {file = "pydantic-1.10.17.tar.gz", hash = "sha256:f434160fb14b353caf634149baaf847206406471ba70e64657c1e8330277a991"}, + {file = "pydantic-1.10.18-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e405ffcc1254d76bb0e760db101ee8916b620893e6edfbfee563b3c6f7a67c02"}, + {file = "pydantic-1.10.18-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e306e280ebebc65040034bff1a0a81fd86b2f4f05daac0131f29541cafd80b80"}, + {file = "pydantic-1.10.18-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11d9d9b87b50338b1b7de4ebf34fd29fdb0d219dc07ade29effc74d3d2609c62"}, + {file = "pydantic-1.10.18-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b661ce52c7b5e5f600c0c3c5839e71918346af2ef20062705ae76b5c16914cab"}, + {file = "pydantic-1.10.18-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c20f682defc9ef81cd7eaa485879ab29a86a0ba58acf669a78ed868e72bb89e0"}, + {file = "pydantic-1.10.18-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c5ae6b7c8483b1e0bf59e5f1843e4fd8fd405e11df7de217ee65b98eb5462861"}, + {file = "pydantic-1.10.18-cp310-cp310-win_amd64.whl", hash = "sha256:74fe19dda960b193b0eb82c1f4d2c8e5e26918d9cda858cbf3f41dd28549cb70"}, + {file = "pydantic-1.10.18-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:72fa46abace0a7743cc697dbb830a41ee84c9db8456e8d77a46d79b537efd7ec"}, + {file = "pydantic-1.10.18-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ef0fe7ad7cbdb5f372463d42e6ed4ca9c443a52ce544472d8842a0576d830da5"}, + {file = "pydantic-1.10.18-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a00e63104346145389b8e8f500bc6a241e729feaf0559b88b8aa513dd2065481"}, + {file = "pydantic-1.10.18-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae6fa2008e1443c46b7b3a5eb03800121868d5ab6bc7cda20b5df3e133cde8b3"}, + {file = "pydantic-1.10.18-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:9f463abafdc92635da4b38807f5b9972276be7c8c5121989768549fceb8d2588"}, + {file = "pydantic-1.10.18-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3445426da503c7e40baccefb2b2989a0c5ce6b163679dd75f55493b460f05a8f"}, + {file = "pydantic-1.10.18-cp311-cp311-win_amd64.whl", hash = "sha256:467a14ee2183bc9c902579bb2f04c3d3dac00eff52e252850509a562255b2a33"}, + {file = "pydantic-1.10.18-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:efbc8a7f9cb5fe26122acba1852d8dcd1e125e723727c59dcd244da7bdaa54f2"}, + {file = "pydantic-1.10.18-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24a4a159d0f7a8e26bf6463b0d3d60871d6a52eac5bb6a07a7df85c806f4c048"}, + {file = "pydantic-1.10.18-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b74be007703547dc52e3c37344d130a7bfacca7df112a9e5ceeb840a9ce195c7"}, + {file = "pydantic-1.10.18-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fcb20d4cb355195c75000a49bb4a31d75e4295200df620f454bbc6bdf60ca890"}, + {file = "pydantic-1.10.18-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:46f379b8cb8a3585e3f61bf9ae7d606c70d133943f339d38b76e041ec234953f"}, + {file = "pydantic-1.10.18-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cbfbca662ed3729204090c4d09ee4beeecc1a7ecba5a159a94b5a4eb24e3759a"}, + {file = "pydantic-1.10.18-cp312-cp312-win_amd64.whl", hash = "sha256:c6d0a9f9eccaf7f438671a64acf654ef0d045466e63f9f68a579e2383b63f357"}, + {file = "pydantic-1.10.18-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3d5492dbf953d7d849751917e3b2433fb26010d977aa7a0765c37425a4026ff1"}, + {file = "pydantic-1.10.18-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe734914977eed33033b70bfc097e1baaffb589517863955430bf2e0846ac30f"}, + {file = "pydantic-1.10.18-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:15fdbe568beaca9aacfccd5ceadfb5f1a235087a127e8af5e48df9d8a45ae85c"}, + {file = "pydantic-1.10.18-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c3e742f62198c9eb9201781fbebe64533a3bbf6a76a91b8d438d62b813079dbc"}, + {file = "pydantic-1.10.18-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:19a3bd00b9dafc2cd7250d94d5b578edf7a0bd7daf102617153ff9a8fa37871c"}, + {file = "pydantic-1.10.18-cp37-cp37m-win_amd64.whl", hash = "sha256:2ce3fcf75b2bae99aa31bd4968de0474ebe8c8258a0110903478bd83dfee4e3b"}, + {file = "pydantic-1.10.18-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:335a32d72c51a313b33fa3a9b0fe283503272ef6467910338e123f90925f0f03"}, + {file = "pydantic-1.10.18-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:34a3613c7edb8c6fa578e58e9abe3c0f5e7430e0fc34a65a415a1683b9c32d9a"}, + {file = "pydantic-1.10.18-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9ee4e6ca1d9616797fa2e9c0bfb8815912c7d67aca96f77428e316741082a1b"}, + {file = "pydantic-1.10.18-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23e8ec1ce4e57b4f441fc91e3c12adba023fedd06868445a5b5f1d48f0ab3682"}, + {file = "pydantic-1.10.18-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:44ae8a3e35a54d2e8fa88ed65e1b08967a9ef8c320819a969bfa09ce5528fafe"}, + {file = "pydantic-1.10.18-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5389eb3b48a72da28c6e061a247ab224381435256eb541e175798483368fdd3"}, + {file = "pydantic-1.10.18-cp38-cp38-win_amd64.whl", hash = "sha256:069b9c9fc645474d5ea3653788b544a9e0ccd3dca3ad8c900c4c6eac844b4620"}, + {file = "pydantic-1.10.18-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:80b982d42515632eb51f60fa1d217dfe0729f008e81a82d1544cc392e0a50ddf"}, + {file = "pydantic-1.10.18-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:aad8771ec8dbf9139b01b56f66386537c6fe4e76c8f7a47c10261b69ad25c2c9"}, + {file = "pydantic-1.10.18-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:941a2eb0a1509bd7f31e355912eb33b698eb0051730b2eaf9e70e2e1589cae1d"}, + {file = "pydantic-1.10.18-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65f7361a09b07915a98efd17fdec23103307a54db2000bb92095457ca758d485"}, + {file = "pydantic-1.10.18-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6951f3f47cb5ca4da536ab161ac0163cab31417d20c54c6de5ddcab8bc813c3f"}, + {file = "pydantic-1.10.18-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a4c5eec138a9b52c67f664c7d51d4c7234c5ad65dd8aacd919fb47445a62c86"}, + {file = "pydantic-1.10.18-cp39-cp39-win_amd64.whl", hash = "sha256:49e26c51ca854286bffc22b69787a8d4063a62bf7d83dc21d44d2ff426108518"}, + {file = "pydantic-1.10.18-py3-none-any.whl", hash = "sha256:06a189b81ffc52746ec9c8c007f16e5167c8b0a696e1a726369327e3db7b2a82"}, + {file = "pydantic-1.10.18.tar.gz", hash = "sha256:baebdff1907d1d96a139c25136a9bb7d17e118f133a76a2ef3b845e831e3403a"}, ] [package.dependencies] @@ -2686,15 +2705,18 @@ files = [ [[package]] name = "pyreadline3" -version = "3.4.1" +version = "3.5.4" description = "A python implementation of GNU readline." optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, - {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, + {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, + {file = "pyreadline3-3.5.4.tar.gz", hash = "sha256:8d57d53039a1c75adba8e50dd3d992b28143480816187ea5efbd5c78e6c885b7"}, ] +[package.extras] +dev = ["build", "flake8", "mypy", "pytest", "twine"] + [[package]] name = "pysam" version = "0.22.1" @@ -2733,24 +2755,23 @@ files = [ [[package]] name = "pytest" -version = "7.0.1" +version = "7.2.2" description = "pytest: simple powerful testing with Python" optional = false -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "pytest-7.0.1-py3-none-any.whl", hash = "sha256:9ce3ff477af913ecf6321fe337b93a2c0dcf2a0a1439c43f5452112c1e4280db"}, - {file = "pytest-7.0.1.tar.gz", hash = "sha256:e30905a0c131d3d94b89624a1cc5afec3e0ba2fbdb151867d8e0ebd49850f171"}, + {file = "pytest-7.2.2-py3-none-any.whl", hash = "sha256:130328f552dcfac0b1cec75c12e3f005619dc5f874f0a06e8ff7263f0ee6225e"}, + {file = "pytest-7.2.2.tar.gz", hash = "sha256:c99ab0c73aceb050f68929bc93af19ab6db0558791c6a0715723abe9d0ade9d4"}, ] [package.dependencies] -atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} attrs = ">=19.2.0" colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} iniconfig = "*" packaging = "*" pluggy = ">=0.12,<2.0" -py = ">=1.8.2" -tomli = ">=1.0.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] @@ -2868,27 +2889,24 @@ files = [ [[package]] name = "python-multipart" -version = "0.0.9" +version = "0.0.12" description = "A streaming multipart parser for Python" optional = true python-versions = ">=3.8" files = [ - {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, - {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, + {file = "python_multipart-0.0.12-py3-none-any.whl", hash = "sha256:43dcf96cf65888a9cd3423544dd0d75ac10f7aa0c3c28a175bbcd00c9ce1aebf"}, + {file = "python_multipart-0.0.12.tar.gz", hash = "sha256:045e1f98d719c1ce085ed7f7e1ef9d8ccc8c02ba02b5566d5f7521410ced58cb"}, ] -[package.extras] -dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] - [[package]] name = "pytz" -version = "2024.1" +version = "2024.2" description = "World timezone definitions, modern and historical" optional = false python-versions = "*" files = [ - {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, - {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, + {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, + {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, ] [[package]] @@ -2989,13 +3007,13 @@ rpds-py = ">=0.7.0" [[package]] name = "requests" -version = "2.31.0" +version = "2.32.3" description = "Python HTTP for Humans." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, - {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, + {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, + {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, ] [package.dependencies] @@ -3172,125 +3190,141 @@ crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"] [[package]] name = "setuptools" -version = "72.1.0" +version = "75.1.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-72.1.0-py3-none-any.whl", hash = "sha256:5a03e1860cf56bb6ef48ce186b0e557fdba433237481a9a625176c2831be15d1"}, - {file = "setuptools-72.1.0.tar.gz", hash = "sha256:8d243eff56d095e5817f796ede6ae32941278f542e0f941867cc05ae52b162ec"}, + {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, + {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, ] [package.extras] -core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.text (>=3.7)", "more-itertools (>=8.8)", "ordered-set (>=3.1.1)", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] -doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "mypy (==1.11.*)", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-home (>=0.5)", "pytest-mypy", "pytest-perf", "pytest-ruff (<0.4)", "pytest-ruff (>=0.2.1)", "pytest-ruff (>=0.3.2)", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.5.2)"] +core = ["importlib-metadata (>=6)", "importlib-resources (>=5.10.2)", "jaraco.collections", "jaraco.functools", "jaraco.text (>=3.7)", "more-itertools", "more-itertools (>=8.8)", "packaging", "packaging (>=24)", "platformdirs (>=2.6.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test", "packaging (>=23.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib-metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.11.*)", "pytest-mypy"] [[package]] name = "simplejson" -version = "3.19.2" +version = "3.19.3" description = "Simple, fast, extensible JSON encoder/decoder for Python" optional = true -python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" -files = [ - {file = "simplejson-3.19.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3471e95110dcaf901db16063b2e40fb394f8a9e99b3fe9ee3acc6f6ef72183a2"}, - {file = "simplejson-3.19.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:3194cd0d2c959062b94094c0a9f8780ffd38417a5322450a0db0ca1a23e7fbd2"}, - {file = "simplejson-3.19.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:8a390e56a7963e3946ff2049ee1eb218380e87c8a0e7608f7f8790ba19390867"}, - {file = "simplejson-3.19.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1537b3dd62d8aae644f3518c407aa8469e3fd0f179cdf86c5992792713ed717a"}, - {file = "simplejson-3.19.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a8617625369d2d03766413bff9e64310feafc9fc4f0ad2b902136f1a5cd8c6b0"}, - {file = "simplejson-3.19.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:2c433a412e96afb9a3ce36fa96c8e61a757af53e9c9192c97392f72871e18e69"}, - {file = "simplejson-3.19.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f1c70249b15e4ce1a7d5340c97670a95f305ca79f376887759b43bb33288c973"}, - {file = "simplejson-3.19.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:287e39ba24e141b046812c880f4619d0ca9e617235d74abc27267194fc0c7835"}, - {file = "simplejson-3.19.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6f0a0b41dd05eefab547576bed0cf066595f3b20b083956b1405a6f17d1be6ad"}, - {file = "simplejson-3.19.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f98d918f7f3aaf4b91f2b08c0c92b1774aea113334f7cde4fe40e777114dbe6"}, - {file = "simplejson-3.19.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d74beca677623481810c7052926365d5f07393c72cbf62d6cce29991b676402"}, - {file = "simplejson-3.19.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7f2398361508c560d0bf1773af19e9fe644e218f2a814a02210ac2c97ad70db0"}, - {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ad331349b0b9ca6da86064a3599c425c7a21cd41616e175ddba0866da32df48"}, - {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:332c848f02d71a649272b3f1feccacb7e4f7e6de4a2e6dc70a32645326f3d428"}, - {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25785d038281cd106c0d91a68b9930049b6464288cea59ba95b35ee37c2d23a5"}, - {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18955c1da6fc39d957adfa346f75226246b6569e096ac9e40f67d102278c3bcb"}, - {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:11cc3afd8160d44582543838b7e4f9aa5e97865322844b75d51bf4e0e413bb3e"}, - {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b01fda3e95d07a6148702a641e5e293b6da7863f8bc9b967f62db9461330562c"}, - {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:778331444917108fa8441f59af45886270d33ce8a23bfc4f9b192c0b2ecef1b3"}, - {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9eb117db8d7ed733a7317c4215c35993b815bf6aeab67523f1f11e108c040672"}, - {file = "simplejson-3.19.2-cp310-cp310-win32.whl", hash = "sha256:39b6d79f5cbfa3eb63a869639cfacf7c41d753c64f7801efc72692c1b2637ac7"}, - {file = "simplejson-3.19.2-cp310-cp310-win_amd64.whl", hash = "sha256:5675e9d8eeef0aa06093c1ff898413ade042d73dc920a03e8cea2fb68f62445a"}, - {file = "simplejson-3.19.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ed628c1431100b0b65387419551e822987396bee3c088a15d68446d92f554e0c"}, - {file = "simplejson-3.19.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:adcb3332979cbc941b8fff07181f06d2b608625edc0a4d8bc3ffc0be414ad0c4"}, - {file = "simplejson-3.19.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:08889f2f597ae965284d7b52a5c3928653a9406d88c93e3161180f0abc2433ba"}, - {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef7938a78447174e2616be223f496ddccdbf7854f7bf2ce716dbccd958cc7d13"}, - {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a970a2e6d5281d56cacf3dc82081c95c1f4da5a559e52469287457811db6a79b"}, - {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:554313db34d63eac3b3f42986aa9efddd1a481169c12b7be1e7512edebff8eaf"}, - {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d36081c0b1c12ea0ed62c202046dca11438bee48dd5240b7c8de8da62c620e9"}, - {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a3cd18e03b0ee54ea4319cdcce48357719ea487b53f92a469ba8ca8e39df285e"}, - {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:66e5dc13bfb17cd6ee764fc96ccafd6e405daa846a42baab81f4c60e15650414"}, - {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:972a7833d4a1fcf7a711c939e315721a88b988553fc770a5b6a5a64bd6ebeba3"}, - {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3e74355cb47e0cd399ead3477e29e2f50e1540952c22fb3504dda0184fc9819f"}, - {file = "simplejson-3.19.2-cp311-cp311-win32.whl", hash = "sha256:1dd4f692304854352c3e396e9b5f0a9c9e666868dd0bdc784e2ac4c93092d87b"}, - {file = "simplejson-3.19.2-cp311-cp311-win_amd64.whl", hash = "sha256:9300aee2a8b5992d0f4293d88deb59c218989833e3396c824b69ba330d04a589"}, - {file = "simplejson-3.19.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b8d940fd28eb34a7084877747a60873956893e377f15a32ad445fe66c972c3b8"}, - {file = "simplejson-3.19.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4969d974d9db826a2c07671273e6b27bc48e940738d768fa8f33b577f0978378"}, - {file = "simplejson-3.19.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c594642d6b13d225e10df5c16ee15b3398e21a35ecd6aee824f107a625690374"}, - {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2f5a398b5e77bb01b23d92872255e1bcb3c0c719a3be40b8df146570fe7781a"}, - {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176a1b524a3bd3314ed47029a86d02d5a95cc0bee15bd3063a1e1ec62b947de6"}, - {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3c7363a8cb8c5238878ec96c5eb0fc5ca2cb11fc0c7d2379863d342c6ee367a"}, - {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:346820ae96aa90c7d52653539a57766f10f33dd4be609206c001432b59ddf89f"}, - {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de9a2792612ec6def556d1dc621fd6b2073aff015d64fba9f3e53349ad292734"}, - {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1c768e7584c45094dca4b334af361e43b0aaa4844c04945ac7d43379eeda9bc2"}, - {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:9652e59c022e62a5b58a6f9948b104e5bb96d3b06940c6482588176f40f4914b"}, - {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9c1a4393242e321e344213a90a1e3bf35d2f624aa8b8f6174d43e3c6b0e8f6eb"}, - {file = "simplejson-3.19.2-cp312-cp312-win32.whl", hash = "sha256:7cb98be113911cb0ad09e5523d0e2a926c09a465c9abb0784c9269efe4f95917"}, - {file = "simplejson-3.19.2-cp312-cp312-win_amd64.whl", hash = "sha256:6779105d2fcb7fcf794a6a2a233787f6bbd4731227333a072d8513b252ed374f"}, - {file = "simplejson-3.19.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:061e81ea2d62671fa9dea2c2bfbc1eec2617ae7651e366c7b4a2baf0a8c72cae"}, - {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4280e460e51f86ad76dc456acdbfa9513bdf329556ffc8c49e0200878ca57816"}, - {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11c39fbc4280d7420684494373b7c5904fa72a2b48ef543a56c2d412999c9e5d"}, - {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bccb3e88ec26ffa90f72229f983d3a5d1155e41a1171190fa723d4135523585b"}, - {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bb5b50dc6dd671eb46a605a3e2eb98deb4a9af787a08fcdddabe5d824bb9664"}, - {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d94245caa3c61f760c4ce4953cfa76e7739b6f2cbfc94cc46fff6c050c2390c5"}, - {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d0e5ffc763678d48ecc8da836f2ae2dd1b6eb2d27a48671066f91694e575173c"}, - {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d222a9ed082cd9f38b58923775152003765016342a12f08f8c123bf893461f28"}, - {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8434dcdd347459f9fd9c526117c01fe7ca7b016b6008dddc3c13471098f4f0dc"}, - {file = "simplejson-3.19.2-cp36-cp36m-win32.whl", hash = "sha256:c9ac1c2678abf9270e7228133e5b77c6c3c930ad33a3c1dfbdd76ff2c33b7b50"}, - {file = "simplejson-3.19.2-cp36-cp36m-win_amd64.whl", hash = "sha256:92c4a4a2b1f4846cd4364855cbac83efc48ff5a7d7c06ba014c792dd96483f6f"}, - {file = "simplejson-3.19.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0d551dc931638e2102b8549836a1632e6e7cf620af3d093a7456aa642bff601d"}, - {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73a8a4653f2e809049999d63530180d7b5a344b23a793502413ad1ecea9a0290"}, - {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40847f617287a38623507d08cbcb75d51cf9d4f9551dd6321df40215128325a3"}, - {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be893258d5b68dd3a8cba8deb35dc6411db844a9d35268a8d3793b9d9a256f80"}, - {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9eb3cff1b7d71aa50c89a0536f469cb8d6dcdd585d8f14fb8500d822f3bdee4"}, - {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d0f402e787e6e7ee7876c8b05e2fe6464820d9f35ba3f172e95b5f8b699f6c7f"}, - {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fbbcc6b0639aa09b9649f36f1bcb347b19403fe44109948392fbb5ea69e48c3e"}, - {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:2fc697be37585eded0c8581c4788fcfac0e3f84ca635b73a5bf360e28c8ea1a2"}, - {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b0a3eb6dd39cce23801a50c01a0976971498da49bc8a0590ce311492b82c44b"}, - {file = "simplejson-3.19.2-cp37-cp37m-win32.whl", hash = "sha256:49f9da0d6cd17b600a178439d7d2d57c5ef01f816b1e0e875e8e8b3b42db2693"}, - {file = "simplejson-3.19.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c87c22bd6a987aca976e3d3e23806d17f65426191db36d40da4ae16a6a494cbc"}, - {file = "simplejson-3.19.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e4c166f743bb42c5fcc60760fb1c3623e8fda94f6619534217b083e08644b46"}, - {file = "simplejson-3.19.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0a48679310e1dd5c9f03481799311a65d343748fe86850b7fb41df4e2c00c087"}, - {file = "simplejson-3.19.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0521e0f07cb56415fdb3aae0bbd8701eb31a9dfef47bb57206075a0584ab2a2"}, - {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d2d5119b1d7a1ed286b8af37357116072fc96700bce3bec5bb81b2e7057ab41"}, - {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c1467d939932901a97ba4f979e8f2642415fcf02ea12f53a4e3206c9c03bc17"}, - {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49aaf4546f6023c44d7e7136be84a03a4237f0b2b5fb2b17c3e3770a758fc1a0"}, - {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60848ab779195b72382841fc3fa4f71698a98d9589b0a081a9399904487b5832"}, - {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0436a70d8eb42bea4fe1a1c32d371d9bb3b62c637969cb33970ad624d5a3336a"}, - {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:49e0e3faf3070abdf71a5c80a97c1afc059b4f45a5aa62de0c2ca0444b51669b"}, - {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ff836cd4041e16003549449cc0a5e372f6b6f871eb89007ab0ee18fb2800fded"}, - {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3848427b65e31bea2c11f521b6fc7a3145d6e501a1038529da2391aff5970f2f"}, - {file = "simplejson-3.19.2-cp38-cp38-win32.whl", hash = "sha256:3f39bb1f6e620f3e158c8b2eaf1b3e3e54408baca96a02fe891794705e788637"}, - {file = "simplejson-3.19.2-cp38-cp38-win_amd64.whl", hash = "sha256:0405984f3ec1d3f8777c4adc33eac7ab7a3e629f3b1c05fdded63acc7cf01137"}, - {file = "simplejson-3.19.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:445a96543948c011a3a47c8e0f9d61e9785df2544ea5be5ab3bc2be4bd8a2565"}, - {file = "simplejson-3.19.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a8c3cc4f9dfc33220246760358c8265dad6e1104f25f0077bbca692d616d358"}, - {file = "simplejson-3.19.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af9c7e6669c4d0ad7362f79cb2ab6784d71147503e62b57e3d95c4a0f222c01c"}, - {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:064300a4ea17d1cd9ea1706aa0590dcb3be81112aac30233823ee494f02cb78a"}, - {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9453419ea2ab9b21d925d0fd7e3a132a178a191881fab4169b6f96e118cc25bb"}, - {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e038c615b3906df4c3be8db16b3e24821d26c55177638ea47b3f8f73615111c"}, - {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16ca9c90da4b1f50f089e14485db8c20cbfff2d55424062791a7392b5a9b3ff9"}, - {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1018bd0d70ce85f165185d2227c71e3b1e446186f9fa9f971b69eee223e1e3cd"}, - {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e8dd53a8706b15bc0e34f00e6150fbefb35d2fd9235d095b4f83b3c5ed4fa11d"}, - {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:2d022b14d7758bfb98405672953fe5c202ea8a9ccf9f6713c5bd0718eba286fd"}, - {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:febffa5b1eda6622d44b245b0685aff6fb555ce0ed734e2d7b1c3acd018a2cff"}, - {file = "simplejson-3.19.2-cp39-cp39-win32.whl", hash = "sha256:4edcd0bf70087b244ba77038db23cd98a1ace2f91b4a3ecef22036314d77ac23"}, - {file = "simplejson-3.19.2-cp39-cp39-win_amd64.whl", hash = "sha256:aad7405c033d32c751d98d3a65801e2797ae77fac284a539f6c3a3e13005edc4"}, - {file = "simplejson-3.19.2-py3-none-any.whl", hash = "sha256:bcedf4cae0d47839fee7de344f96b5694ca53c786f28b5f773d4f0b265a159eb"}, - {file = "simplejson-3.19.2.tar.gz", hash = "sha256:9eb442a2442ce417801c912df68e1f6ccfcd41577ae7274953ab3ad24ef7d82c"}, +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.5" +files = [ + {file = "simplejson-3.19.3-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:f39caec26007a2d0efab6b8b1d74873ede9351962707afab622cc2285dd26ed0"}, + {file = "simplejson-3.19.3-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:83c87706265ae3028e8460d08b05f30254c569772e859e5ba61fe8af2c883468"}, + {file = "simplejson-3.19.3-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0b5ddd2c7d1d3f4d23224bc8a04bbf1430ae9a8149c05b90f8fc610f7f857a23"}, + {file = "simplejson-3.19.3-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:ad0e0b1ce9bd3edb5cf64b5b5b76eacbfdac8c5367153aeeec8a8b1407f68342"}, + {file = "simplejson-3.19.3-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:93be280fc69a952c76e261036312c20b910e7fa9e234f1d89bdfe3fa34f8a023"}, + {file = "simplejson-3.19.3-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:6d43e24b88c80f997081503f693be832fc90854f278df277dd54f8a4c847ab61"}, + {file = "simplejson-3.19.3-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:2876027ebdd599d730d36464debe84619b0368e9a642ca6e7c601be55aed439e"}, + {file = "simplejson-3.19.3-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:0766ca6222b410e08e0053a0dda3606cafb3973d5d00538307f631bb59743396"}, + {file = "simplejson-3.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:50d8b742d74c449c4dcac570d08ce0f21f6a149d2d9cf7652dbf2ba9a1bc729a"}, + {file = "simplejson-3.19.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dd011fc3c1d88b779645495fdb8189fb318a26981eebcce14109460e062f209b"}, + {file = "simplejson-3.19.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:637c4d4b81825c1f4d651e56210bd35b5604034b192b02d2d8f17f7ce8c18f42"}, + {file = "simplejson-3.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f56eb03bc9e432bb81adc8ecff2486d39feb371abb442964ffb44f6db23b332"}, + {file = "simplejson-3.19.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ef59a53be400c1fad2c914b8d74c9d42384fed5174f9321dd021b7017fd40270"}, + {file = "simplejson-3.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72e8abbc86fcac83629a030888b45fed3a404d54161118be52cb491cd6975d3e"}, + {file = "simplejson-3.19.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8efb03ca77bd7725dfacc9254df00d73e6f43013cf39bd37ef1a8ed0ebb5165"}, + {file = "simplejson-3.19.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:add8850db04b98507a8b62d248a326ecc8561e6d24336d1ca5c605bbfaab4cad"}, + {file = "simplejson-3.19.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:fc3dc9fb413fc34c396f52f4c87de18d0bd5023804afa8ab5cc224deeb6a9900"}, + {file = "simplejson-3.19.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dfa420bb9225dd33b6efdabde7c6a671b51150b9b1d9c4e5cd74d3b420b3fe1"}, + {file = "simplejson-3.19.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7b5c472099b39b274dcde27f1113db8d818c9aa3ba8f78cbb8ad04a4c1ac2118"}, + {file = "simplejson-3.19.3-cp310-cp310-win32.whl", hash = "sha256:817abad79241ed4a507b3caf4d3f2be5079f39d35d4c550a061988986bffd2ec"}, + {file = "simplejson-3.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:dd5b9b1783e14803e362a558680d88939e830db2466f3fa22df5c9319f8eea94"}, + {file = "simplejson-3.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e88abff510dcff903a18d11c2a75f9964e768d99c8d147839913886144b2065e"}, + {file = "simplejson-3.19.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:934a50a614fb831614db5dbfba35127ee277624dda4d15895c957d2f5d48610c"}, + {file = "simplejson-3.19.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:212fce86a22188b0c7f53533b0f693ea9605c1a0f02c84c475a30616f55a744d"}, + {file = "simplejson-3.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d9e8f836688a8fabe6a6b41b334aa550a6823f7b4ac3d3712fc0ad8655be9a8"}, + {file = "simplejson-3.19.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:23228037dc5d41c36666384062904d74409a62f52283d9858fa12f4c22cffad1"}, + {file = "simplejson-3.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0791f64fed7d4abad639491f8a6b1ba56d3c604eb94b50f8697359b92d983f36"}, + {file = "simplejson-3.19.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4f614581b61a26fbbba232a1391f6cee82bc26f2abbb6a0b44a9bba25c56a1c"}, + {file = "simplejson-3.19.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1df0aaf1cb787fdf34484ed4a1f0c545efd8811f6028623290fef1a53694e597"}, + {file = "simplejson-3.19.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:951095be8d4451a7182403354c22ec2de3e513e0cc40408b689af08d02611588"}, + {file = "simplejson-3.19.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:2a954b30810988feeabde843e3263bf187697e0eb5037396276db3612434049b"}, + {file = "simplejson-3.19.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c40df31a75de98db2cdfead6074d4449cd009e79f54c1ebe5e5f1f153c68ad20"}, + {file = "simplejson-3.19.3-cp311-cp311-win32.whl", hash = "sha256:7e2a098c21ad8924076a12b6c178965d88a0ad75d1de67e1afa0a66878f277a5"}, + {file = "simplejson-3.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:c9bedebdc5fdad48af8783022bae307746d54006b783007d1d3c38e10872a2c6"}, + {file = "simplejson-3.19.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:66a0399e21c2112acacfebf3d832ebe2884f823b1c7e6d1363f2944f1db31a99"}, + {file = "simplejson-3.19.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6ef9383c5e05f445be60f1735c1816163c874c0b1ede8bb4390aff2ced34f333"}, + {file = "simplejson-3.19.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:42e5acf80d4d971238d4df97811286a044d720693092b20a56d5e56b7dcc5d09"}, + {file = "simplejson-3.19.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0b0efc7279d768db7c74d3d07f0b5c81280d16ae3fb14e9081dc903e8360771"}, + {file = "simplejson-3.19.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0552eb06e7234da892e1d02365cd2b7b2b1f8233aa5aabdb2981587b7cc92ea0"}, + {file = "simplejson-3.19.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf6a3b9a7d7191471b464fe38f684df10eb491ec9ea454003edb45a011ab187"}, + {file = "simplejson-3.19.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7017329ca8d4dca94ad5e59f496e5fc77630aecfc39df381ffc1d37fb6b25832"}, + {file = "simplejson-3.19.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:67a20641afebf4cfbcff50061f07daad1eace6e7b31d7622b6fa2c40d43900ba"}, + {file = "simplejson-3.19.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:dd6a7dabcc4c32daf601bc45e01b79175dde4b52548becea4f9545b0a4428169"}, + {file = "simplejson-3.19.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:08f9b443a94e72dd02c87098c96886d35790e79e46b24e67accafbf13b73d43b"}, + {file = "simplejson-3.19.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa97278ae6614346b5ca41a45a911f37a3261b57dbe4a00602048652c862c28b"}, + {file = "simplejson-3.19.3-cp312-cp312-win32.whl", hash = "sha256:ef28c3b328d29b5e2756903aed888960bc5df39b4c2eab157ae212f70ed5bf74"}, + {file = "simplejson-3.19.3-cp312-cp312-win_amd64.whl", hash = "sha256:1e662336db50ad665777e6548b5076329a94a0c3d4a0472971c588b3ef27de3a"}, + {file = "simplejson-3.19.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0959e6cb62e3994b5a40e31047ff97ef5c4138875fae31659bead691bed55896"}, + {file = "simplejson-3.19.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7a7bfad839c624e139a4863007233a3f194e7c51551081f9789cba52e4da5167"}, + {file = "simplejson-3.19.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:afab2f7f2486a866ff04d6d905e9386ca6a231379181a3838abce1f32fbdcc37"}, + {file = "simplejson-3.19.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d00313681015ac498e1736b304446ee6d1c72c5b287cd196996dad84369998f7"}, + {file = "simplejson-3.19.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d936ae682d5b878af9d9eb4d8bb1fdd5e41275c8eb59ceddb0aeed857bb264a2"}, + {file = "simplejson-3.19.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01c6657485393f2e9b8177c77a7634f13ebe70d5e6de150aae1677d91516ce6b"}, + {file = "simplejson-3.19.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a6a750d3c7461b1c47cfc6bba8d9e57a455e7c5f80057d2a82f738040dd1129"}, + {file = "simplejson-3.19.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ea7a4a998c87c5674a27089e022110a1a08a7753f21af3baf09efe9915c23c3c"}, + {file = "simplejson-3.19.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6300680d83a399be2b8f3b0ef7ef90b35d2a29fe6e9c21438097e0938bbc1564"}, + {file = "simplejson-3.19.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:ab69f811a660c362651ae395eba8ce84f84c944cea0df5718ea0ba9d1e4e7252"}, + {file = "simplejson-3.19.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:256e09d0f94d9c3d177d9e95fd27a68c875a4baa2046633df387b86b652f5747"}, + {file = "simplejson-3.19.3-cp313-cp313-win32.whl", hash = "sha256:2c78293470313aefa9cfc5e3f75ca0635721fb016fb1121c1c5b0cb8cc74712a"}, + {file = "simplejson-3.19.3-cp313-cp313-win_amd64.whl", hash = "sha256:3bbcdc438dc1683b35f7a8dc100960c721f922f9ede8127f63bed7dfded4c64c"}, + {file = "simplejson-3.19.3-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:89b35433186e977fa86ff1fd179c1fadff39cfa3afa1648dab0b6ca53153acd9"}, + {file = "simplejson-3.19.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d43c2d7504eda566c50203cdc9dc043aff6f55f1b7dae0dcd79dfefef9159d1c"}, + {file = "simplejson-3.19.3-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6890ff9cf0bd2e1d487e2a8869ebd620a44684c0a9667fa5ee751d099d5d84c8"}, + {file = "simplejson-3.19.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1069143a8fb3905e1bc0696c62be7e3adf812e9f1976ac9ae15b05112ff57cc9"}, + {file = "simplejson-3.19.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cb324bb903330cbb35d87cce367a12631cd5720afa06e5b9c906483970946da6"}, + {file = "simplejson-3.19.3-cp36-cp36m-musllinux_1_2_aarch64.whl", hash = "sha256:0a32859d45d7b85fb803bb68f6bee14526991a1190269116c33399fa0daf9bbf"}, + {file = "simplejson-3.19.3-cp36-cp36m-musllinux_1_2_i686.whl", hash = "sha256:23833ee7e791ec968b744dfee2a2d39df7152050051096caf4296506d75608d8"}, + {file = "simplejson-3.19.3-cp36-cp36m-musllinux_1_2_ppc64le.whl", hash = "sha256:d73efb03c5b39249c82488a994f0998f9e4399e3d085209d2120503305ba77a8"}, + {file = "simplejson-3.19.3-cp36-cp36m-musllinux_1_2_x86_64.whl", hash = "sha256:7923878b7a0142d39763ec2dbecff3053c1bedd3653585a8474666e420fe83f5"}, + {file = "simplejson-3.19.3-cp36-cp36m-win32.whl", hash = "sha256:7355c7203353c36d46c4e7b6055293b3d2be097bbc5e2874a2b8a7259f0325dd"}, + {file = "simplejson-3.19.3-cp36-cp36m-win_amd64.whl", hash = "sha256:d1b8b4d6379fe55f471914345fe6171d81a18649dacf3248abfc9c349b4442eb"}, + {file = "simplejson-3.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d36608557b4dcd7a62c29ad4cd7c5a1720bbf7dc942eff9dc42d2c542a5f042d"}, + {file = "simplejson-3.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7137e69c6781ecf23afab064be94a277236c9cba31aa48ff1a0ec3995c69171e"}, + {file = "simplejson-3.19.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76f8c28fe2d426182405b18ddf3001fce47835a557dc15c3d8bdea01c03361da"}, + {file = "simplejson-3.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff7bc1bbdaa3e487c9469128bf39408e91f5573901cb852e03af378d3582c52d"}, + {file = "simplejson-3.19.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0782cb9bf827f0c488b6aa0f2819f618308a3caf2973cfd792e45d631bec4db"}, + {file = "simplejson-3.19.3-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:6fea0716c593dabb4392c4996d4e902a83b2428e6da82938cf28a523a11eb277"}, + {file = "simplejson-3.19.3-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:8f41bb5370b34f63171e65fdb00e12be1d83675cecb23e627df26f4c88dfc021"}, + {file = "simplejson-3.19.3-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:37105d1d708365b91165e1a6e505bdecc88637091348cf4b6adcdcb4f5a5fb8b"}, + {file = "simplejson-3.19.3-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:b9198c1f1f8910a3b86b60f4fe2556d9d28d3fefe35bffe6be509a27402e694d"}, + {file = "simplejson-3.19.3-cp37-cp37m-win32.whl", hash = "sha256:bc164f32dd9691e7082ce5df24b4cf8c6c394bbf9bdeeb5d843127cd07ab8ad2"}, + {file = "simplejson-3.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:1bd41f2cb1a2c57656ceff67b12d005cb255c728265e222027ad73193a04005a"}, + {file = "simplejson-3.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0733ecd95ae03ae718ec74aad818f5af5f3155d596f7b242acbc1621e765e5fb"}, + {file = "simplejson-3.19.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4a0710d1a5e41c4f829caa1572793dd3130c8d65c2b194c24ff29c4c305c26e0"}, + {file = "simplejson-3.19.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1a53a07320c5ff574d8b1a89c937ce33608832f166f39dff0581ac43dc979abd"}, + {file = "simplejson-3.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1773cabfba66a6337b547e45dafbd471b09487370bcab75bd28f626520410d29"}, + {file = "simplejson-3.19.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c0104b4b7d2c75ccedbf1d9d5a3bd2daa75e51053935a44ba012e2fd4c43752"}, + {file = "simplejson-3.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c49eeb94b8f09dc8a5843c156a22b8bde6aa1ddc65ca8ddc62dddcc001e6a2d"}, + {file = "simplejson-3.19.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dc5c1a85ff388e98ea877042daec3d157b6db0d85bac6ba5498034689793e7e"}, + {file = "simplejson-3.19.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:49549e3d81ab4a58424405aa545602674d8c35c20e986b42bb8668e782a94bac"}, + {file = "simplejson-3.19.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:e1a1452ad5723ff129b081e3c8aa4ba56b8734fee4223355ed7b815a7ece69bc"}, + {file = "simplejson-3.19.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:d0d5a63f1768fed7e78cf55712dee81f5a345e34d34224f3507ebf71df2b754d"}, + {file = "simplejson-3.19.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:7e062767ac165df9a46963f5735aa4eee0089ec1e48b3f2ec46182754b96f55e"}, + {file = "simplejson-3.19.3-cp38-cp38-win32.whl", hash = "sha256:56134bbafe458a7b21f6fddbf889d36bec6d903718f4430768e3af822f8e27c2"}, + {file = "simplejson-3.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:bcde83a553a96dc7533736c547bddaa35414a2566ab0ecf7d3964fc4bdb84c11"}, + {file = "simplejson-3.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b5587feda2b65a79da985ae6d116daf6428bf7489992badc29fc96d16cd27b05"}, + {file = "simplejson-3.19.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e0d2b00ecbcd1a3c5ea1abc8bb99a26508f758c1759fd01c3be482a3655a176f"}, + {file = "simplejson-3.19.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:32a3ada8f3ea41db35e6d37b86dade03760f804628ec22e4fe775b703d567426"}, + {file = "simplejson-3.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f455672f4738b0f47183c5896e3606cd65c9ddee3805a4d18e8c96aa3f47c84"}, + {file = "simplejson-3.19.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b737a5fefedb8333fa50b8db3dcc9b1d18fd6c598f89fa7debff8b46bf4e511"}, + {file = "simplejson-3.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb47ee773ce67476a960e2db4a0a906680c54f662521550828c0cc57d0099426"}, + {file = "simplejson-3.19.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eed8cd98a7b24861da9d3d937f5fbfb6657350c547528a117297fe49e3960667"}, + {file = "simplejson-3.19.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:619756f1dd634b5bdf57d9a3914300526c3b348188a765e45b8b08eabef0c94e"}, + {file = "simplejson-3.19.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:dd7230d061e755d60a4d5445bae854afe33444cdb182f3815cff26ac9fb29a15"}, + {file = "simplejson-3.19.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:101a3c8392028cd704a93c7cba8926594e775ca3c91e0bee82144e34190903f1"}, + {file = "simplejson-3.19.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1e557712fc79f251673aeb3fad3501d7d4da3a27eff0857af2e1d1afbbcf6685"}, + {file = "simplejson-3.19.3-cp39-cp39-win32.whl", hash = "sha256:0bc5544e3128891bf613b9f71813ee2ec9c11574806f74dd8bb84e5e95bf64a2"}, + {file = "simplejson-3.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:06662392e4913dc8846d6a71a6d5de86db5fba244831abe1dd741d62a4136764"}, + {file = "simplejson-3.19.3-py3-none-any.whl", hash = "sha256:49cc4c7b940d43bd12bf87ec63f28cbc4964fc4e12c031cc8cd01650f43eb94e"}, + {file = "simplejson-3.19.3.tar.gz", hash = "sha256:8e086896c36210ab6050f2f9f095a5f1e03c83fa0e7f296d6cba425411364680"}, ] [[package]] @@ -3343,71 +3377,71 @@ files = [ [[package]] name = "soupsieve" -version = "2.5" +version = "2.6" description = "A modern CSS selector implementation for Beautiful Soup." optional = true python-versions = ">=3.8" files = [ - {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, - {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, + {file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"}, + {file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"}, ] [[package]] name = "sqlalchemy" -version = "2.0.32" +version = "2.0.35" description = "Database Abstraction Library" optional = false python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0c9045ecc2e4db59bfc97b20516dfdf8e41d910ac6fb667ebd3a79ea54084619"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1467940318e4a860afd546ef61fefb98a14d935cd6817ed07a228c7f7c62f389"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5954463675cb15db8d4b521f3566a017c8789222b8316b1e6934c811018ee08b"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:167e7497035c303ae50651b351c28dc22a40bb98fbdb8468cdc971821b1ae533"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b27dfb676ac02529fb6e343b3a482303f16e6bc3a4d868b73935b8792edb52d0"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:bf2360a5e0f7bd75fa80431bf8ebcfb920c9f885e7956c7efde89031695cafb8"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-win32.whl", hash = "sha256:306fe44e754a91cd9d600a6b070c1f2fadbb4a1a257b8781ccf33c7067fd3e4d"}, - {file = "SQLAlchemy-2.0.32-cp310-cp310-win_amd64.whl", hash = "sha256:99db65e6f3ab42e06c318f15c98f59a436f1c78179e6a6f40f529c8cc7100b22"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:21b053be28a8a414f2ddd401f1be8361e41032d2ef5884b2f31d31cb723e559f"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b178e875a7a25b5938b53b006598ee7645172fccafe1c291a706e93f48499ff5"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:723a40ee2cc7ea653645bd4cf024326dea2076673fc9d3d33f20f6c81db83e1d"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:295ff8689544f7ee7e819529633d058bd458c1fd7f7e3eebd0f9268ebc56c2a0"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:49496b68cd190a147118af585173ee624114dfb2e0297558c460ad7495f9dfe2"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:acd9b73c5c15f0ec5ce18128b1fe9157ddd0044abc373e6ecd5ba376a7e5d961"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-win32.whl", hash = "sha256:9365a3da32dabd3e69e06b972b1ffb0c89668994c7e8e75ce21d3e5e69ddef28"}, - {file = "SQLAlchemy-2.0.32-cp311-cp311-win_amd64.whl", hash = "sha256:8bd63d051f4f313b102a2af1cbc8b80f061bf78f3d5bd0843ff70b5859e27924"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6bab3db192a0c35e3c9d1560eb8332463e29e5507dbd822e29a0a3c48c0a8d92"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:19d98f4f58b13900d8dec4ed09dd09ef292208ee44cc9c2fe01c1f0a2fe440e9"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd33c61513cb1b7371fd40cf221256456d26a56284e7d19d1f0b9f1eb7dd7e8"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6ba0497c1d066dd004e0f02a92426ca2df20fac08728d03f67f6960271feec"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2b6be53e4fde0065524f1a0a7929b10e9280987b320716c1509478b712a7688c"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:916a798f62f410c0b80b63683c8061f5ebe237b0f4ad778739304253353bc1cb"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-win32.whl", hash = "sha256:31983018b74908ebc6c996a16ad3690301a23befb643093fcfe85efd292e384d"}, - {file = "SQLAlchemy-2.0.32-cp312-cp312-win_amd64.whl", hash = "sha256:4363ed245a6231f2e2957cccdda3c776265a75851f4753c60f3004b90e69bfeb"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b8afd5b26570bf41c35c0121801479958b4446751a3971fb9a480c1afd85558e"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c750987fc876813f27b60d619b987b057eb4896b81117f73bb8d9918c14f1cad"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ada0102afff4890f651ed91120c1120065663506b760da4e7823913ebd3258be"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:78c03d0f8a5ab4f3034c0e8482cfcc415a3ec6193491cfa1c643ed707d476f16"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:3bd1cae7519283ff525e64645ebd7a3e0283f3c038f461ecc1c7b040a0c932a1"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-win32.whl", hash = "sha256:01438ebcdc566d58c93af0171c74ec28efe6a29184b773e378a385e6215389da"}, - {file = "SQLAlchemy-2.0.32-cp37-cp37m-win_amd64.whl", hash = "sha256:4979dc80fbbc9d2ef569e71e0896990bc94df2b9fdbd878290bd129b65ab579c"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c742be912f57586ac43af38b3848f7688863a403dfb220193a882ea60e1ec3a"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62e23d0ac103bcf1c5555b6c88c114089587bc64d048fef5bbdb58dfd26f96da"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:251f0d1108aab8ea7b9aadbd07fb47fb8e3a5838dde34aa95a3349876b5a1f1d"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ef18a84e5116340e38eca3e7f9eeaaef62738891422e7c2a0b80feab165905f"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3eb6a97a1d39976f360b10ff208c73afb6a4de86dd2a6212ddf65c4a6a2347d5"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0c1c9b673d21477cec17ab10bc4decb1322843ba35b481585facd88203754fc5"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-win32.whl", hash = "sha256:c41a2b9ca80ee555decc605bd3c4520cc6fef9abde8fd66b1cf65126a6922d65"}, - {file = "SQLAlchemy-2.0.32-cp38-cp38-win_amd64.whl", hash = "sha256:8a37e4d265033c897892279e8adf505c8b6b4075f2b40d77afb31f7185cd6ecd"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:52fec964fba2ef46476312a03ec8c425956b05c20220a1a03703537824b5e8e1"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:328429aecaba2aee3d71e11f2477c14eec5990fb6d0e884107935f7fb6001632"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85a01b5599e790e76ac3fe3aa2f26e1feba56270023d6afd5550ed63c68552b3"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aaf04784797dcdf4c0aa952c8d234fa01974c4729db55c45732520ce12dd95b4"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:4488120becf9b71b3ac718f4138269a6be99a42fe023ec457896ba4f80749525"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:14e09e083a5796d513918a66f3d6aedbc131e39e80875afe81d98a03312889e6"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-win32.whl", hash = "sha256:0d322cc9c9b2154ba7e82f7bf25ecc7c36fbe2d82e2933b3642fc095a52cfc78"}, - {file = "SQLAlchemy-2.0.32-cp39-cp39-win_amd64.whl", hash = "sha256:7dd8583df2f98dea28b5cd53a1beac963f4f9d087888d75f22fcc93a07cf8d84"}, - {file = "SQLAlchemy-2.0.32-py3-none-any.whl", hash = "sha256:e567a8793a692451f706b363ccf3c45e056b67d90ead58c3bc9471af5d212202"}, - {file = "SQLAlchemy-2.0.32.tar.gz", hash = "sha256:c1b88cc8b02b6a5f0efb0345a03672d4c897dc7d92585176f88c67346f565ea8"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, + {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, + {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, + {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f021d334f2ca692523aaf7bbf7592ceff70c8594fad853416a81d66b35e3abf9"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05c3f58cf91683102f2f0265c0db3bd3892e9eedabe059720492dbaa4f922da1"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:032d979ce77a6c2432653322ba4cbeabf5a6837f704d16fa38b5a05d8e21fa00"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:2e795c2f7d7249b75bb5f479b432a51b59041580d20599d4e112b5f2046437a3"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:cc32b2990fc34380ec2f6195f33a76b6cdaa9eecf09f0c9404b74fc120aef36f"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win32.whl", hash = "sha256:9509c4123491d0e63fb5e16199e09f8e262066e58903e84615c301dde8fa2e87"}, + {file = "SQLAlchemy-2.0.35-cp37-cp37m-win_amd64.whl", hash = "sha256:3655af10ebcc0f1e4e06c5900bb33e080d6a1fa4228f502121f28a3b1753cde5"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4c31943b61ed8fdd63dfd12ccc919f2bf95eefca133767db6fbbd15da62078ec"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a62dd5d7cc8626a3634208df458c5fe4f21200d96a74d122c83bc2015b333bc1"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0630774b0977804fba4b6bbea6852ab56c14965a2b0c7fc7282c5f7d90a1ae72"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d625eddf7efeba2abfd9c014a22c0f6b3796e0ffb48f5d5ab106568ef01ff5a"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:ada603db10bb865bbe591939de854faf2c60f43c9b763e90f653224138f910d9"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c41411e192f8d3ea39ea70e0fae48762cd11a2244e03751a98bd3c0ca9a4e936"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win32.whl", hash = "sha256:d299797d75cd747e7797b1b41817111406b8b10a4f88b6e8fe5b5e59598b43b0"}, + {file = "SQLAlchemy-2.0.35-cp38-cp38-win_amd64.whl", hash = "sha256:0375a141e1c0878103eb3d719eb6d5aa444b490c96f3fedab8471c7f6ffe70ee"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ccae5de2a0140d8be6838c331604f91d6fafd0735dbdcee1ac78fc8fbaba76b4"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2a275a806f73e849e1c309ac11108ea1a14cd7058577aba962cd7190e27c9e3c"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:732e026240cdd1c1b2e3ac515c7a23820430ed94292ce33806a95869c46bd139"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:890da8cd1941fa3dab28c5bac3b9da8502e7e366f895b3b8e500896f12f94d11"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c0d8326269dbf944b9201911b0d9f3dc524d64779a07518199a58384c3d37a44"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b76d63495b0508ab9fc23f8152bac63205d2a704cd009a2b0722f4c8e0cba8e0"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win32.whl", hash = "sha256:69683e02e8a9de37f17985905a5eca18ad651bf592314b4d3d799029797d0eb3"}, + {file = "SQLAlchemy-2.0.35-cp39-cp39-win_amd64.whl", hash = "sha256:aee110e4ef3c528f3abbc3c2018c121e708938adeeff9006428dd7c8555e9b3f"}, + {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, + {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, ] [package.dependencies] @@ -3568,24 +3602,24 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0, [[package]] name = "types-awscrt" -version = "0.21.2" +version = "0.21.5" description = "Type annotations and code completion for awscrt" optional = false -python-versions = "<4.0,>=3.7" +python-versions = ">=3.8" files = [ - {file = "types_awscrt-0.21.2-py3-none-any.whl", hash = "sha256:0839fe12f0f914d8f7d63ed777c728cb4eccc2d5d79a26e377d12b0604e7bf0e"}, - {file = "types_awscrt-0.21.2.tar.gz", hash = "sha256:84a9f4f422ec525c314fdf54c23a1e73edfbcec968560943ca2d41cfae623b38"}, + {file = "types_awscrt-0.21.5-py3-none-any.whl", hash = "sha256:117ff2b1bb657f09d01b7e0ce3fe3fa6e039be12d30b826896182725c9ce85b1"}, + {file = "types_awscrt-0.21.5.tar.gz", hash = "sha256:9f7f47de68799cb2bcb9e486f48d77b9f58962b92fba43cb8860da70b3c57d1b"}, ] [[package]] name = "types-pyasn1" -version = "0.6.0.20240402" +version = "0.6.0.20240913" description = "Typing stubs for pyasn1" optional = false python-versions = ">=3.8" files = [ - {file = "types-pyasn1-0.6.0.20240402.tar.gz", hash = "sha256:5d54dcb33f69dd269071ca098e923ac20c5f03c814631fa7f3ed9ee035a5da3a"}, - {file = "types_pyasn1-0.6.0.20240402-py3-none-any.whl", hash = "sha256:848d01e7313c200acc035a8b3d377fe7b2aecbe77f2be49eb160a7f82835aaaf"}, + {file = "types-pyasn1-0.6.0.20240913.tar.gz", hash = "sha256:a1da054db13d3d4ccfa69c515678154014336ad3d9f9ade01845f9edb1a2bc71"}, + {file = "types_pyasn1-0.6.0.20240913-py3-none-any.whl", hash = "sha256:95f3cb1fbd63ff91cd0410945f8aeae6b0be359533c00f39d8e17124884157af"}, ] [[package]] @@ -3604,24 +3638,24 @@ types-pyasn1 = "*" [[package]] name = "types-pytz" -version = "2024.1.0.20240417" +version = "2024.2.0.20240913" description = "Typing stubs for pytz" optional = false python-versions = ">=3.8" files = [ - {file = "types-pytz-2024.1.0.20240417.tar.gz", hash = "sha256:6810c8a1f68f21fdf0f4f374a432487c77645a0ac0b31de4bf4690cf21ad3981"}, - {file = "types_pytz-2024.1.0.20240417-py3-none-any.whl", hash = "sha256:8335d443310e2db7b74e007414e74c4f53b67452c0cb0d228ca359ccfba59659"}, + {file = "types-pytz-2024.2.0.20240913.tar.gz", hash = "sha256:4433b5df4a6fc587bbed41716d86a5ba5d832b4378e506f40d34bc9c81df2c24"}, + {file = "types_pytz-2024.2.0.20240913-py3-none-any.whl", hash = "sha256:a1eebf57ebc6e127a99d2fa2ba0a88d2b173784ef9b3defcc2004ab6855a44df"}, ] [[package]] name = "types-pyyaml" -version = "6.0.12.20240808" +version = "6.0.12.20240917" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types-PyYAML-6.0.12.20240808.tar.gz", hash = "sha256:b8f76ddbd7f65440a8bda5526a9607e4c7a322dc2f8e1a8c405644f9a6f4b9af"}, - {file = "types_PyYAML-6.0.12.20240808-py3-none-any.whl", hash = "sha256:deda34c5c655265fc517b546c902aa6eed2ef8d3e921e4765fe606fe2afe8d35"}, + {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"}, + {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"}, ] [[package]] @@ -3654,13 +3688,13 @@ urllib3 = ">=2" [[package]] name = "types-s3transfer" -version = "0.10.1" +version = "0.10.2" description = "Type annotations and code completion for s3transfer" optional = false -python-versions = "<4.0,>=3.8" +python-versions = ">=3.8" files = [ - {file = "types_s3transfer-0.10.1-py3-none-any.whl", hash = "sha256:49a7c81fa609ac1532f8de3756e64b58afcecad8767933310228002ec7adff74"}, - {file = "types_s3transfer-0.10.1.tar.gz", hash = "sha256:02154cce46528287ad76ad1a0153840e0492239a0887e8833466eccf84b98da0"}, + {file = "types_s3transfer-0.10.2-py3-none-any.whl", hash = "sha256:7a3fec8cd632e2b5efb665a355ef93c2a87fdd5a45b74a949f95a9e628a86356"}, + {file = "types_s3transfer-0.10.2.tar.gz", hash = "sha256:60167a3bfb5c536ec6cdb5818f7f9a28edca9dc3e0b5ff85ae374526fc5e576e"}, ] [[package]] @@ -3687,24 +3721,24 @@ files = [ [[package]] name = "tzdata" -version = "2024.1" +version = "2024.2" description = "Provider of IANA time zone data" optional = false python-versions = ">=2" files = [ - {file = "tzdata-2024.1-py2.py3-none-any.whl", hash = "sha256:9068bc196136463f5245e51efda838afa15aaeca9903f49050dfa2679db4d252"}, - {file = "tzdata-2024.1.tar.gz", hash = "sha256:2674120f8d891909751c38abcdfd386ac0a5a1127954fbc332af6b5ceae07efd"}, + {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, + {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, ] [[package]] name = "urllib3" -version = "1.26.19" +version = "1.26.20" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ - {file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, - {file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, + {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, + {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, ] [package.extras] @@ -3714,13 +3748,13 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "urllib3" -version = "2.2.2" +version = "2.2.3" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.8" files = [ - {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, - {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, + {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, + {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, ] [package.extras] @@ -3731,13 +3765,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.30.5" +version = "0.31.0" description = "The lightning-fast ASGI server." optional = true python-versions = ">=3.8" files = [ - {file = "uvicorn-0.30.5-py3-none-any.whl", hash = "sha256:b2d86de274726e9878188fa07576c9ceeff90a839e2b6e25c917fe05f5a6c835"}, - {file = "uvicorn-0.30.5.tar.gz", hash = "sha256:ac6fdbd4425c5fd17a9fe39daf4d4d075da6fdc80f653e5894cdc2fd98752bee"}, + {file = "uvicorn-0.31.0-py3-none-any.whl", hash = "sha256:cac7be4dd4d891c363cd942160a7b02e69150dcbc7a36be04d5f4af4b17c8ced"}, + {file = "uvicorn-0.31.0.tar.gz", hash = "sha256:13bc21373d103859f68fe739608e2eb054a816dea79189bc3ca08ea89a275906"}, ] [package.dependencies] @@ -3757,42 +3791,42 @@ standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", [[package]] name = "uvloop" -version = "0.19.0" +version = "0.20.0" description = "Fast implementation of asyncio event loop on top of libuv" optional = true python-versions = ">=3.8.0" files = [ - {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de4313d7f575474c8f5a12e163f6d89c0a878bc49219641d49e6f1444369a90e"}, - {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5588bd21cf1fcf06bded085f37e43ce0e00424197e7c10e77afd4bbefffef428"}, - {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b1fd71c3843327f3bbc3237bedcdb6504fd50368ab3e04d0410e52ec293f5b8"}, - {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a05128d315e2912791de6088c34136bfcdd0c7cbc1cf85fd6fd1bb321b7c849"}, - {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cd81bdc2b8219cb4b2556eea39d2e36bfa375a2dd021404f90a62e44efaaf957"}, - {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f17766fb6da94135526273080f3455a112f82570b2ee5daa64d682387fe0dcd"}, - {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4ce6b0af8f2729a02a5d1575feacb2a94fc7b2e983868b009d51c9a9d2149bef"}, - {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:31e672bb38b45abc4f26e273be83b72a0d28d074d5b370fc4dcf4c4eb15417d2"}, - {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:570fc0ed613883d8d30ee40397b79207eedd2624891692471808a95069a007c1"}, - {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5138821e40b0c3e6c9478643b4660bd44372ae1e16a322b8fc07478f92684e24"}, - {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:91ab01c6cd00e39cde50173ba4ec68a1e578fee9279ba64f5221810a9e786533"}, - {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:47bf3e9312f63684efe283f7342afb414eea4d3011542155c7e625cd799c3b12"}, - {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:da8435a3bd498419ee8c13c34b89b5005130a476bda1d6ca8cfdde3de35cd650"}, - {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:02506dc23a5d90e04d4f65c7791e65cf44bd91b37f24cfc3ef6cf2aff05dc7ec"}, - {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2693049be9d36fef81741fddb3f441673ba12a34a704e7b4361efb75cf30befc"}, - {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7010271303961c6f0fe37731004335401eb9075a12680738731e9c92ddd96ad6"}, - {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5daa304d2161d2918fa9a17d5635099a2f78ae5b5960e742b2fcfbb7aefaa593"}, - {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7207272c9520203fea9b93843bb775d03e1cf88a80a936ce760f60bb5add92f3"}, - {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:78ab247f0b5671cc887c31d33f9b3abfb88d2614b84e4303f1a63b46c046c8bd"}, - {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:472d61143059c84947aa8bb74eabbace30d577a03a1805b77933d6bd13ddebbd"}, - {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45bf4c24c19fb8a50902ae37c5de50da81de4922af65baf760f7c0c42e1088be"}, - {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271718e26b3e17906b28b67314c45d19106112067205119dddbd834c2b7ce797"}, - {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:34175c9fd2a4bc3adc1380e1261f60306344e3407c20a4d684fd5f3be010fa3d"}, - {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e27f100e1ff17f6feeb1f33968bc185bf8ce41ca557deee9d9bbbffeb72030b7"}, - {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13dfdf492af0aa0a0edf66807d2b465607d11c4fa48f4a1fd41cbea5b18e8e8b"}, - {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e3d4e85ac060e2342ff85e90d0c04157acb210b9ce508e784a944f852a40e67"}, - {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ca4956c9ab567d87d59d49fa3704cf29e37109ad348f2d5223c9bf761a332e7"}, - {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f467a5fd23b4fc43ed86342641f3936a68ded707f4627622fa3f82a120e18256"}, - {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:492e2c32c2af3f971473bc22f086513cedfc66a130756145a931a90c3958cb17"}, - {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2df95fca285a9f5bfe730e51945ffe2fa71ccbfdde3b0da5772b4ee4f2e770d5"}, - {file = "uvloop-0.19.0.tar.gz", hash = "sha256:0246f4fd1bf2bf702e06b0d45ee91677ee5c31242f39aab4ea6fe0c51aedd0fd"}, + {file = "uvloop-0.20.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9ebafa0b96c62881d5cafa02d9da2e44c23f9f0cd829f3a32a6aff771449c996"}, + {file = "uvloop-0.20.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:35968fc697b0527a06e134999eef859b4034b37aebca537daeb598b9d45a137b"}, + {file = "uvloop-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b16696f10e59d7580979b420eedf6650010a4a9c3bd8113f24a103dfdb770b10"}, + {file = "uvloop-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b04d96188d365151d1af41fa2d23257b674e7ead68cfd61c725a422764062ae"}, + {file = "uvloop-0.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:94707205efbe809dfa3a0d09c08bef1352f5d3d6612a506f10a319933757c006"}, + {file = "uvloop-0.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89e8d33bb88d7263f74dc57d69f0063e06b5a5ce50bb9a6b32f5fcbe655f9e73"}, + {file = "uvloop-0.20.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e50289c101495e0d1bb0bfcb4a60adde56e32f4449a67216a1ab2750aa84f037"}, + {file = "uvloop-0.20.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e237f9c1e8a00e7d9ddaa288e535dc337a39bcbf679f290aee9d26df9e72bce9"}, + {file = "uvloop-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:746242cd703dc2b37f9d8b9f173749c15e9a918ddb021575a0205ec29a38d31e"}, + {file = "uvloop-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82edbfd3df39fb3d108fc079ebc461330f7c2e33dbd002d146bf7c445ba6e756"}, + {file = "uvloop-0.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80dc1b139516be2077b3e57ce1cb65bfed09149e1d175e0478e7a987863b68f0"}, + {file = "uvloop-0.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f44af67bf39af25db4c1ac27e82e9665717f9c26af2369c404be865c8818dcf"}, + {file = "uvloop-0.20.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4b75f2950ddb6feed85336412b9a0c310a2edbcf4cf931aa5cfe29034829676d"}, + {file = "uvloop-0.20.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:77fbc69c287596880ecec2d4c7a62346bef08b6209749bf6ce8c22bbaca0239e"}, + {file = "uvloop-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6462c95f48e2d8d4c993a2950cd3d31ab061864d1c226bbf0ee2f1a8f36674b9"}, + {file = "uvloop-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649c33034979273fa71aa25d0fe120ad1777c551d8c4cd2c0c9851d88fcb13ab"}, + {file = "uvloop-0.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a609780e942d43a275a617c0839d85f95c334bad29c4c0918252085113285b5"}, + {file = "uvloop-0.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aea15c78e0d9ad6555ed201344ae36db5c63d428818b4b2a42842b3870127c00"}, + {file = "uvloop-0.20.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0e94b221295b5e69de57a1bd4aeb0b3a29f61be6e1b478bb8a69a73377db7ba"}, + {file = "uvloop-0.20.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fee6044b64c965c425b65a4e17719953b96e065c5b7e09b599ff332bb2744bdf"}, + {file = "uvloop-0.20.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:265a99a2ff41a0fd56c19c3838b29bf54d1d177964c300dad388b27e84fd7847"}, + {file = "uvloop-0.20.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10c2956efcecb981bf9cfb8184d27d5d64b9033f917115a960b83f11bfa0d6b"}, + {file = "uvloop-0.20.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e7d61fe8e8d9335fac1bf8d5d82820b4808dd7a43020c149b63a1ada953d48a6"}, + {file = "uvloop-0.20.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2beee18efd33fa6fdb0976e18475a4042cd31c7433c866e8a09ab604c7c22ff2"}, + {file = "uvloop-0.20.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d8c36fdf3e02cec92aed2d44f63565ad1522a499c654f07935c8f9d04db69e95"}, + {file = "uvloop-0.20.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0fac7be202596c7126146660725157d4813aa29a4cc990fe51346f75ff8fde7"}, + {file = "uvloop-0.20.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d0fba61846f294bce41eb44d60d58136090ea2b5b99efd21cbdf4e21927c56a"}, + {file = "uvloop-0.20.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95720bae002ac357202e0d866128eb1ac82545bcf0b549b9abe91b5178d9b541"}, + {file = "uvloop-0.20.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:36c530d8fa03bfa7085af54a48f2ca16ab74df3ec7108a46ba82fd8b411a2315"}, + {file = "uvloop-0.20.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e97152983442b499d7a71e44f29baa75b3b02e65d9c44ba53b10338e98dedb66"}, + {file = "uvloop-0.20.0.tar.gz", hash = "sha256:4603ca714a754fc8d9b197e325db25b2ea045385e8a3ad05d3463de725fdf469"}, ] [package.extras] @@ -3801,13 +3835,13 @@ test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)" [[package]] name = "virtualenv" -version = "20.26.3" +version = "20.26.6" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.7" files = [ - {file = "virtualenv-20.26.3-py3-none-any.whl", hash = "sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589"}, - {file = "virtualenv-20.26.3.tar.gz", hash = "sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a"}, + {file = "virtualenv-20.26.6-py3-none-any.whl", hash = "sha256:7345cc5b25405607a624d8418154577459c3e0277f5466dd79c49d5e492995f2"}, + {file = "virtualenv-20.26.6.tar.gz", hash = "sha256:280aede09a2a5c317e409a00102e7077c6432c5a38f0ef938e643805a7ad2c48"}, ] [package.dependencies] @@ -3821,98 +3855,94 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [[package]] name = "watchfiles" -version = "0.23.0" +version = "0.24.0" description = "Simple, modern and high performance file watching and code reload in python." optional = true python-versions = ">=3.8" files = [ - {file = "watchfiles-0.23.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:bee8ce357a05c20db04f46c22be2d1a2c6a8ed365b325d08af94358e0688eeb4"}, - {file = "watchfiles-0.23.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4ccd3011cc7ee2f789af9ebe04745436371d36afe610028921cab9f24bb2987b"}, - {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb02d41c33be667e6135e6686f1bb76104c88a312a18faa0ef0262b5bf7f1a0f"}, - {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cf12ac34c444362f3261fb3ff548f0037ddd4c5bb85f66c4be30d2936beb3c5"}, - {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0b2c25040a3c0ce0e66c7779cc045fdfbbb8d59e5aabfe033000b42fe44b53e"}, - {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf2be4b9eece4f3da8ba5f244b9e51932ebc441c0867bd6af46a3d97eb068d6"}, - {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40cb8fa00028908211eb9f8d47744dca21a4be6766672e1ff3280bee320436f1"}, - {file = "watchfiles-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f48c917ffd36ff9a5212614c2d0d585fa8b064ca7e66206fb5c095015bc8207"}, - {file = "watchfiles-0.23.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9d183e3888ada88185ab17064079c0db8c17e32023f5c278d7bf8014713b1b5b"}, - {file = "watchfiles-0.23.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9837edf328b2805346f91209b7e660f65fb0e9ca18b7459d075d58db082bf981"}, - {file = "watchfiles-0.23.0-cp310-none-win32.whl", hash = "sha256:296e0b29ab0276ca59d82d2da22cbbdb39a23eed94cca69aed274595fb3dfe42"}, - {file = "watchfiles-0.23.0-cp310-none-win_amd64.whl", hash = "sha256:4ea756e425ab2dfc8ef2a0cb87af8aa7ef7dfc6fc46c6f89bcf382121d4fff75"}, - {file = "watchfiles-0.23.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:e397b64f7aaf26915bf2ad0f1190f75c855d11eb111cc00f12f97430153c2eab"}, - {file = "watchfiles-0.23.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b4ac73b02ca1824ec0a7351588241fd3953748d3774694aa7ddb5e8e46aef3e3"}, - {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:130a896d53b48a1cecccfa903f37a1d87dbb74295305f865a3e816452f6e49e4"}, - {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c5e7803a65eb2d563c73230e9d693c6539e3c975ccfe62526cadde69f3fda0cf"}, - {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d1aa4cc85202956d1a65c88d18c7b687b8319dbe6b1aec8969784ef7a10e7d1a"}, - {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87f889f6e58849ddb7c5d2cb19e2e074917ed1c6e3ceca50405775166492cca8"}, - {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37fd826dac84c6441615aa3f04077adcc5cac7194a021c9f0d69af20fb9fa788"}, - {file = "watchfiles-0.23.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee7db6e36e7a2c15923072e41ea24d9a0cf39658cb0637ecc9307b09d28827e1"}, - {file = "watchfiles-0.23.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:2368c5371c17fdcb5a2ea71c5c9d49f9b128821bfee69503cc38eae00feb3220"}, - {file = "watchfiles-0.23.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:857af85d445b9ba9178db95658c219dbd77b71b8264e66836a6eba4fbf49c320"}, - {file = "watchfiles-0.23.0-cp311-none-win32.whl", hash = "sha256:1d636c8aeb28cdd04a4aa89030c4b48f8b2954d8483e5f989774fa441c0ed57b"}, - {file = "watchfiles-0.23.0-cp311-none-win_amd64.whl", hash = "sha256:46f1d8069a95885ca529645cdbb05aea5837d799965676e1b2b1f95a4206313e"}, - {file = "watchfiles-0.23.0-cp311-none-win_arm64.whl", hash = "sha256:e495ed2a7943503766c5d1ff05ae9212dc2ce1c0e30a80d4f0d84889298fa304"}, - {file = "watchfiles-0.23.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1db691bad0243aed27c8354b12d60e8e266b75216ae99d33e927ff5238d270b5"}, - {file = "watchfiles-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:62d2b18cb1edaba311fbbfe83fb5e53a858ba37cacb01e69bc20553bb70911b8"}, - {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e087e8fdf1270d000913c12e6eca44edd02aad3559b3e6b8ef00f0ce76e0636f"}, - {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dd41d5c72417b87c00b1b635738f3c283e737d75c5fa5c3e1c60cd03eac3af77"}, - {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e5f3ca0ff47940ce0a389457b35d6df601c317c1e1a9615981c474452f98de1"}, - {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6991e3a78f642368b8b1b669327eb6751439f9f7eaaa625fae67dd6070ecfa0b"}, - {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f7252f52a09f8fa5435dc82b6af79483118ce6bd51eb74e6269f05ee22a7b9f"}, - {file = "watchfiles-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e01bcb8d767c58865207a6c2f2792ad763a0fe1119fb0a430f444f5b02a5ea0"}, - {file = "watchfiles-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:8e56fbcdd27fce061854ddec99e015dd779cae186eb36b14471fc9ae713b118c"}, - {file = "watchfiles-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bd3e2d64500a6cad28bcd710ee6269fbeb2e5320525acd0cfab5f269ade68581"}, - {file = "watchfiles-0.23.0-cp312-none-win32.whl", hash = "sha256:eb99c954291b2fad0eff98b490aa641e128fbc4a03b11c8a0086de8b7077fb75"}, - {file = "watchfiles-0.23.0-cp312-none-win_amd64.whl", hash = "sha256:dccc858372a56080332ea89b78cfb18efb945da858fabeb67f5a44fa0bcb4ebb"}, - {file = "watchfiles-0.23.0-cp312-none-win_arm64.whl", hash = "sha256:6c21a5467f35c61eafb4e394303720893066897fca937bade5b4f5877d350ff8"}, - {file = "watchfiles-0.23.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:ba31c32f6b4dceeb2be04f717811565159617e28d61a60bb616b6442027fd4b9"}, - {file = "watchfiles-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:85042ab91814fca99cec4678fc063fb46df4cbb57b4835a1cc2cb7a51e10250e"}, - {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24655e8c1c9c114005c3868a3d432c8aa595a786b8493500071e6a52f3d09217"}, - {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6b1a950ab299a4a78fd6369a97b8763732bfb154fdb433356ec55a5bce9515c1"}, - {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8d3c5cd327dd6ce0edfc94374fb5883d254fe78a5e9d9dfc237a1897dc73cd1"}, - {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ff785af8bacdf0be863ec0c428e3288b817e82f3d0c1d652cd9c6d509020dd0"}, - {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02b7ba9d4557149410747353e7325010d48edcfe9d609a85cb450f17fd50dc3d"}, - {file = "watchfiles-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48a1b05c0afb2cd2f48c1ed2ae5487b116e34b93b13074ed3c22ad5c743109f0"}, - {file = "watchfiles-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:109a61763e7318d9f821b878589e71229f97366fa6a5c7720687d367f3ab9eef"}, - {file = "watchfiles-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:9f8e6bb5ac007d4a4027b25f09827ed78cbbd5b9700fd6c54429278dacce05d1"}, - {file = "watchfiles-0.23.0-cp313-none-win32.whl", hash = "sha256:f46c6f0aec8d02a52d97a583782d9af38c19a29900747eb048af358a9c1d8e5b"}, - {file = "watchfiles-0.23.0-cp313-none-win_amd64.whl", hash = "sha256:f449afbb971df5c6faeb0a27bca0427d7b600dd8f4a068492faec18023f0dcff"}, - {file = "watchfiles-0.23.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:2dddc2487d33e92f8b6222b5fb74ae2cfde5e8e6c44e0248d24ec23befdc5366"}, - {file = "watchfiles-0.23.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e75695cc952e825fa3e0684a7f4a302f9128721f13eedd8dbd3af2ba450932b8"}, - {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2537ef60596511df79b91613a5bb499b63f46f01a11a81b0a2b0dedf645d0a9c"}, - {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20b423b58f5fdde704a226b598a2d78165fe29eb5621358fe57ea63f16f165c4"}, - {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b98732ec893975455708d6fc9a6daab527fc8bbe65be354a3861f8c450a632a4"}, - {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee1f5fcbf5bc33acc0be9dd31130bcba35d6d2302e4eceafafd7d9018c7755ab"}, - {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8f195338a5a7b50a058522b39517c50238358d9ad8284fd92943643144c0c03"}, - {file = "watchfiles-0.23.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:524fcb8d59b0dbee2c9b32207084b67b2420f6431ed02c18bd191e6c575f5c48"}, - {file = "watchfiles-0.23.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0eff099a4df36afaa0eea7a913aa64dcf2cbd4e7a4f319a73012210af4d23810"}, - {file = "watchfiles-0.23.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a8323daae27ea290ba3350c70c836c0d2b0fb47897fa3b0ca6a5375b952b90d3"}, - {file = "watchfiles-0.23.0-cp38-none-win32.whl", hash = "sha256:aafea64a3ae698695975251f4254df2225e2624185a69534e7fe70581066bc1b"}, - {file = "watchfiles-0.23.0-cp38-none-win_amd64.whl", hash = "sha256:c846884b2e690ba62a51048a097acb6b5cd263d8bd91062cd6137e2880578472"}, - {file = "watchfiles-0.23.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a753993635eccf1ecb185dedcc69d220dab41804272f45e4aef0a67e790c3eb3"}, - {file = "watchfiles-0.23.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6bb91fa4d0b392f0f7e27c40981e46dda9eb0fbc84162c7fb478fe115944f491"}, - {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1f67312efa3902a8e8496bfa9824d3bec096ff83c4669ea555c6bdd213aa516"}, - {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7ca6b71dcc50d320c88fb2d88ecd63924934a8abc1673683a242a7ca7d39e781"}, - {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2aec5c29915caf08771d2507da3ac08e8de24a50f746eb1ed295584ba1820330"}, - {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1733b9bc2c8098c6bdb0ff7a3d7cb211753fecb7bd99bdd6df995621ee1a574b"}, - {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02ff5d7bd066c6a7673b17c8879cd8ee903078d184802a7ee851449c43521bdd"}, - {file = "watchfiles-0.23.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18e2de19801b0eaa4c5292a223effb7cfb43904cb742c5317a0ac686ed604765"}, - {file = "watchfiles-0.23.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8ada449e22198c31fb013ae7e9add887e8d2bd2335401abd3cbc55f8c5083647"}, - {file = "watchfiles-0.23.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3af1b05361e1cc497bf1be654a664750ae61f5739e4bb094a2be86ec8c6db9b6"}, - {file = "watchfiles-0.23.0-cp39-none-win32.whl", hash = "sha256:486bda18be5d25ab5d932699ceed918f68eb91f45d018b0343e3502e52866e5e"}, - {file = "watchfiles-0.23.0-cp39-none-win_amd64.whl", hash = "sha256:d2d42254b189a346249424fb9bb39182a19289a2409051ee432fb2926bad966a"}, - {file = "watchfiles-0.23.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9265cf87a5b70147bfb2fec14770ed5b11a5bb83353f0eee1c25a81af5abfe"}, - {file = "watchfiles-0.23.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9f02a259fcbbb5fcfe7a0805b1097ead5ba7a043e318eef1db59f93067f0b49b"}, - {file = "watchfiles-0.23.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebaebb53b34690da0936c256c1cdb0914f24fb0e03da76d185806df9328abed"}, - {file = "watchfiles-0.23.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd257f98cff9c6cb39eee1a83c7c3183970d8a8d23e8cf4f47d9a21329285cee"}, - {file = "watchfiles-0.23.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:aba037c1310dd108411d27b3d5815998ef0e83573e47d4219f45753c710f969f"}, - {file = "watchfiles-0.23.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:a96ac14e184aa86dc43b8a22bb53854760a58b2966c2b41580de938e9bf26ed0"}, - {file = "watchfiles-0.23.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11698bb2ea5e991d10f1f4f83a39a02f91e44e4bd05f01b5c1ec04c9342bf63c"}, - {file = "watchfiles-0.23.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efadd40fca3a04063d40c4448c9303ce24dd6151dc162cfae4a2a060232ebdcb"}, - {file = "watchfiles-0.23.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:556347b0abb4224c5ec688fc58214162e92a500323f50182f994f3ad33385dcb"}, - {file = "watchfiles-0.23.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:1cf7f486169986c4b9d34087f08ce56a35126600b6fef3028f19ca16d5889071"}, - {file = "watchfiles-0.23.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f18de0f82c62c4197bea5ecf4389288ac755896aac734bd2cc44004c56e4ac47"}, - {file = "watchfiles-0.23.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:532e1f2c491274d1333a814e4c5c2e8b92345d41b12dc806cf07aaff786beb66"}, - {file = "watchfiles-0.23.0.tar.gz", hash = "sha256:9338ade39ff24f8086bb005d16c29f8e9f19e55b18dcb04dfa26fcbc09da497b"}, + {file = "watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0"}, + {file = "watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82ae557a8c037c42a6ef26c494d0631cacca040934b101d001100ed93d43f361"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acbfa31e315a8f14fe33e3542cbcafc55703b8f5dcbb7c1eecd30f141df50db3"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b74fdffce9dfcf2dc296dec8743e5b0332d15df19ae464f0e249aa871fc1c571"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:449f43f49c8ddca87c6b3980c9284cab6bd1f5c9d9a2b00012adaaccd5e7decd"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4abf4ad269856618f82dee296ac66b0cd1d71450fc3c98532d93798e73399b7a"}, + {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f895d785eb6164678ff4bb5cc60c5996b3ee6df3edb28dcdeba86a13ea0465e"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ae3e208b31be8ce7f4c2c0034f33406dd24fbce3467f77223d10cd86778471c"}, + {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2efec17819b0046dde35d13fb8ac7a3ad877af41ae4640f4109d9154ed30a188"}, + {file = "watchfiles-0.24.0-cp310-none-win32.whl", hash = "sha256:6bdcfa3cd6fdbdd1a068a52820f46a815401cbc2cb187dd006cb076675e7b735"}, + {file = "watchfiles-0.24.0-cp310-none-win_amd64.whl", hash = "sha256:54ca90a9ae6597ae6dc00e7ed0a040ef723f84ec517d3e7ce13e63e4bc82fa04"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bdcd5538e27f188dd3c804b4a8d5f52a7fc7f87e7fd6b374b8e36a4ca03db428"}, + {file = "watchfiles-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2dadf8a8014fde6addfd3c379e6ed1a981c8f0a48292d662e27cabfe4239c83c"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6509ed3f467b79d95fc62a98229f79b1a60d1b93f101e1c61d10c95a46a84f43"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8360f7314a070c30e4c976b183d1d8d1585a4a50c5cb603f431cebcbb4f66327"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:316449aefacf40147a9efaf3bd7c9bdd35aaba9ac5d708bd1eb5763c9a02bef5"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73bde715f940bea845a95247ea3e5eb17769ba1010efdc938ffcb967c634fa61"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3770e260b18e7f4e576edca4c0a639f704088602e0bc921c5c2e721e3acb8d15"}, + {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa0fd7248cf533c259e59dc593a60973a73e881162b1a2f73360547132742823"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d7a2e3b7f5703ffbd500dabdefcbc9eafeff4b9444bbdd5d83d79eedf8428fab"}, + {file = "watchfiles-0.24.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d831ee0a50946d24a53821819b2327d5751b0c938b12c0653ea5be7dea9c82ec"}, + {file = "watchfiles-0.24.0-cp311-none-win32.whl", hash = "sha256:49d617df841a63b4445790a254013aea2120357ccacbed00253f9c2b5dc24e2d"}, + {file = "watchfiles-0.24.0-cp311-none-win_amd64.whl", hash = "sha256:d3dcb774e3568477275cc76554b5a565024b8ba3a0322f77c246bc7111c5bb9c"}, + {file = "watchfiles-0.24.0-cp311-none-win_arm64.whl", hash = "sha256:9301c689051a4857d5b10777da23fafb8e8e921bcf3abe6448a058d27fb67633"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:7211b463695d1e995ca3feb38b69227e46dbd03947172585ecb0588f19b0d87a"}, + {file = "watchfiles-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4b8693502d1967b00f2fb82fc1e744df128ba22f530e15b763c8d82baee15370"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdab9555053399318b953a1fe1f586e945bc8d635ce9d05e617fd9fe3a4687d6"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:34e19e56d68b0dad5cff62273107cf5d9fbaf9d75c46277aa5d803b3ef8a9e9b"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:41face41f036fee09eba33a5b53a73e9a43d5cb2c53dad8e61fa6c9f91b5a51e"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5148c2f1ea043db13ce9b0c28456e18ecc8f14f41325aa624314095b6aa2e9ea"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e4bd963a935aaf40b625c2499f3f4f6bbd0c3776f6d3bc7c853d04824ff1c9f"}, + {file = "watchfiles-0.24.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c79d7719d027b7a42817c5d96461a99b6a49979c143839fc37aa5748c322f234"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:32aa53a9a63b7f01ed32e316e354e81e9da0e6267435c7243bf8ae0f10b428ef"}, + {file = "watchfiles-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce72dba6a20e39a0c628258b5c308779b8697f7676c254a845715e2a1039b968"}, + {file = "watchfiles-0.24.0-cp312-none-win32.whl", hash = "sha256:d9018153cf57fc302a2a34cb7564870b859ed9a732d16b41a9b5cb2ebed2d444"}, + {file = "watchfiles-0.24.0-cp312-none-win_amd64.whl", hash = "sha256:551ec3ee2a3ac9cbcf48a4ec76e42c2ef938a7e905a35b42a1267fa4b1645896"}, + {file = "watchfiles-0.24.0-cp312-none-win_arm64.whl", hash = "sha256:b52a65e4ea43c6d149c5f8ddb0bef8d4a1e779b77591a458a893eb416624a418"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:3d2e3ab79a1771c530233cadfd277fcc762656d50836c77abb2e5e72b88e3a48"}, + {file = "watchfiles-0.24.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327763da824817b38ad125dcd97595f942d720d32d879f6c4ddf843e3da3fe90"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd82010f8ab451dabe36054a1622870166a67cf3fce894f68895db6f74bbdc94"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d64ba08db72e5dfd5c33be1e1e687d5e4fcce09219e8aee893a4862034081d4e"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1cf1f6dd7825053f3d98f6d33f6464ebdd9ee95acd74ba2c34e183086900a827"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:43e3e37c15a8b6fe00c1bce2473cfa8eb3484bbeecf3aefbf259227e487a03df"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88bcd4d0fe1d8ff43675360a72def210ebad3f3f72cabfeac08d825d2639b4ab"}, + {file = "watchfiles-0.24.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:999928c6434372fde16c8f27143d3e97201160b48a614071261701615a2a156f"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:30bbd525c3262fd9f4b1865cb8d88e21161366561cd7c9e1194819e0a33ea86b"}, + {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edf71b01dec9f766fb285b73930f95f730bb0943500ba0566ae234b5c1618c18"}, + {file = "watchfiles-0.24.0-cp313-none-win32.whl", hash = "sha256:f4c96283fca3ee09fb044f02156d9570d156698bc3734252175a38f0e8975f07"}, + {file = "watchfiles-0.24.0-cp313-none-win_amd64.whl", hash = "sha256:a974231b4fdd1bb7f62064a0565a6b107d27d21d9acb50c484d2cdba515b9366"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:ee82c98bed9d97cd2f53bdb035e619309a098ea53ce525833e26b93f673bc318"}, + {file = "watchfiles-0.24.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:fd92bbaa2ecdb7864b7600dcdb6f2f1db6e0346ed425fbd01085be04c63f0b05"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f83df90191d67af5a831da3a33dd7628b02a95450e168785586ed51e6d28943c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fca9433a45f18b7c779d2bae7beeec4f740d28b788b117a48368d95a3233ed83"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b995bfa6bf01a9e09b884077a6d37070464b529d8682d7691c2d3b540d357a0c"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ed9aba6e01ff6f2e8285e5aa4154e2970068fe0fc0998c4380d0e6278222269b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5171ef898299c657685306d8e1478a45e9303ddcd8ac5fed5bd52ad4ae0b69b"}, + {file = "watchfiles-0.24.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4933a508d2f78099162da473841c652ad0de892719043d3f07cc83b33dfd9d91"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:95cf3b95ea665ab03f5a54765fa41abf0529dbaf372c3b83d91ad2cfa695779b"}, + {file = "watchfiles-0.24.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:01def80eb62bd5db99a798d5e1f5f940ca0a05986dcfae21d833af7a46f7ee22"}, + {file = "watchfiles-0.24.0-cp38-none-win32.whl", hash = "sha256:4d28cea3c976499475f5b7a2fec6b3a36208656963c1a856d328aeae056fc5c1"}, + {file = "watchfiles-0.24.0-cp38-none-win_amd64.whl", hash = "sha256:21ab23fdc1208086d99ad3f69c231ba265628014d4aed31d4e8746bd59e88cd1"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b665caeeda58625c3946ad7308fbd88a086ee51ccb706307e5b1fa91556ac886"}, + {file = "watchfiles-0.24.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5c51749f3e4e269231510da426ce4a44beb98db2dce9097225c338f815b05d4f"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82b2509f08761f29a0fdad35f7e1638b8ab1adfa2666d41b794090361fb8b855"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a60e2bf9dc6afe7f743e7c9b149d1fdd6dbf35153c78fe3a14ae1a9aee3d98b"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f7d9b87c4c55e3ea8881dfcbf6d61ea6775fffed1fedffaa60bd047d3c08c430"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78470906a6be5199524641f538bd2c56bb809cd4bf29a566a75051610bc982c3"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:07cdef0c84c03375f4e24642ef8d8178e533596b229d32d2bbd69e5128ede02a"}, + {file = "watchfiles-0.24.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d337193bbf3e45171c8025e291530fb7548a93c45253897cd764a6a71c937ed9"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ec39698c45b11d9694a1b635a70946a5bad066b593af863460a8e600f0dff1ca"}, + {file = "watchfiles-0.24.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e28d91ef48eab0afb939fa446d8ebe77e2f7593f5f463fd2bb2b14132f95b6e"}, + {file = "watchfiles-0.24.0-cp39-none-win32.whl", hash = "sha256:7138eff8baa883aeaa074359daabb8b6c1e73ffe69d5accdc907d62e50b1c0da"}, + {file = "watchfiles-0.24.0-cp39-none-win_amd64.whl", hash = "sha256:b3ef2c69c655db63deb96b3c3e587084612f9b1fa983df5e0c3379d41307467f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632676574429bee8c26be8af52af20e0c718cc7f5f67f3fb658c71928ccd4f7f"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a2a9891723a735d3e2540651184be6fd5b96880c08ffe1a98bae5017e65b544b"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7fa2bc0efef3e209a8199fd111b8969fe9db9c711acc46636686331eda7dd4"}, + {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01550ccf1d0aed6ea375ef259706af76ad009ef5b0203a3a4cce0f6024f9b68a"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:96619302d4374de5e2345b2b622dc481257a99431277662c30f606f3e22f42be"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:85d5f0c7771dcc7a26c7a27145059b6bb0ce06e4e751ed76cdf123d7039b60b5"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:951088d12d339690a92cef2ec5d3cfd957692834c72ffd570ea76a6790222777"}, + {file = "watchfiles-0.24.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49fb58bcaa343fedc6a9e91f90195b20ccb3135447dc9e4e2570c3a39565853e"}, + {file = "watchfiles-0.24.0.tar.gz", hash = "sha256:afb72325b74fa7a428c009c1b8be4b4d7c2afedafb2982827ef2156646df2fe1"}, ] [package.dependencies] @@ -3959,83 +3989,97 @@ files = [ [[package]] name = "websockets" -version = "12.0" +version = "13.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" optional = true python-versions = ">=3.8" files = [ - {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"}, - {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"}, - {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"}, - {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"}, - {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"}, - {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"}, - {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"}, - {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"}, - {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"}, - {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"}, - {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"}, - {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"}, - {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"}, - {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"}, - {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"}, - {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"}, - {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"}, - {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"}, - {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"}, - {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"}, - {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"}, - {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"}, - {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"}, - {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"}, - {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"}, - {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"}, - {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"}, - {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"}, - {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"}, - {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"}, - {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"}, - {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"}, - {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"}, - {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"}, - {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"}, - {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"}, - {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"}, - {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"}, - {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"}, - {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"}, - {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"}, - {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"}, - {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"}, - {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"}, - {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"}, - {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"}, - {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"}, - {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"}, - {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"}, - {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"}, - {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"}, - {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"}, - {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"}, - {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"}, - {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"}, - {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"}, - {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"}, - {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"}, - {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"}, - {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"}, - {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"}, - {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"}, - {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"}, - {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"}, - {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"}, - {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"}, - {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"}, - {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"}, - {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"}, - {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"}, - {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"}, - {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f48c749857f8fb598fb890a75f540e3221d0976ed0bf879cf3c7eef34151acee"}, + {file = "websockets-13.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c7e72ce6bda6fb9409cc1e8164dd41d7c91466fb599eb047cfda72fe758a34a7"}, + {file = "websockets-13.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f779498eeec470295a2b1a5d97aa1bc9814ecd25e1eb637bd9d1c73a327387f6"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4676df3fe46956fbb0437d8800cd5f2b6d41143b6e7e842e60554398432cf29b"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7affedeb43a70351bb811dadf49493c9cfd1ed94c9c70095fd177e9cc1541fa"}, + {file = "websockets-13.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1971e62d2caa443e57588e1d82d15f663b29ff9dfe7446d9964a4b6f12c1e700"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5f2e75431f8dc4a47f31565a6e1355fb4f2ecaa99d6b89737527ea917066e26c"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:58cf7e75dbf7e566088b07e36ea2e3e2bd5676e22216e4cad108d4df4a7402a0"}, + {file = "websockets-13.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c90d6dec6be2c7d03378a574de87af9b1efea77d0c52a8301dd831ece938452f"}, + {file = "websockets-13.1-cp310-cp310-win32.whl", hash = "sha256:730f42125ccb14602f455155084f978bd9e8e57e89b569b4d7f0f0c17a448ffe"}, + {file = "websockets-13.1-cp310-cp310-win_amd64.whl", hash = "sha256:5993260f483d05a9737073be197371940c01b257cc45ae3f1d5d7adb371b266a"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:61fc0dfcda609cda0fc9fe7977694c0c59cf9d749fbb17f4e9483929e3c48a19"}, + {file = "websockets-13.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ceec59f59d092c5007e815def4ebb80c2de330e9588e101cf8bd94c143ec78a5"}, + {file = "websockets-13.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c1dca61c6db1166c48b95198c0b7d9c990b30c756fc2923cc66f68d17dc558fd"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308e20f22c2c77f3f39caca508e765f8725020b84aa963474e18c59accbf4c02"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62d516c325e6540e8a57b94abefc3459d7dab8ce52ac75c96cad5549e187e3a7"}, + {file = "websockets-13.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87c6e35319b46b99e168eb98472d6c7d8634ee37750d7693656dc766395df096"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:5f9fee94ebafbc3117c30be1844ed01a3b177bb6e39088bc6b2fa1dc15572084"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7c1e90228c2f5cdde263253fa5db63e6653f1c00e7ec64108065a0b9713fa1b3"}, + {file = "websockets-13.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6548f29b0e401eea2b967b2fdc1c7c7b5ebb3eeb470ed23a54cd45ef078a0db9"}, + {file = "websockets-13.1-cp311-cp311-win32.whl", hash = "sha256:c11d4d16e133f6df8916cc5b7e3e96ee4c44c936717d684a94f48f82edb7c92f"}, + {file = "websockets-13.1-cp311-cp311-win_amd64.whl", hash = "sha256:d04f13a1d75cb2b8382bdc16ae6fa58c97337253826dfe136195b7f89f661557"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:9d75baf00138f80b48f1eac72ad1535aac0b6461265a0bcad391fc5aba875cfc"}, + {file = "websockets-13.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:9b6f347deb3dcfbfde1c20baa21c2ac0751afaa73e64e5b693bb2b848efeaa49"}, + {file = "websockets-13.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:de58647e3f9c42f13f90ac7e5f58900c80a39019848c5547bc691693098ae1bd"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1b54689e38d1279a51d11e3467dd2f3a50f5f2e879012ce8f2d6943f00e83f0"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf1781ef73c073e6b0f90af841aaf98501f975d306bbf6221683dd594ccc52b6"}, + {file = "websockets-13.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d23b88b9388ed85c6faf0e74d8dec4f4d3baf3ecf20a65a47b836d56260d4b9"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3c78383585f47ccb0fcf186dcb8a43f5438bd7d8f47d69e0b56f71bf431a0a68"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d6d300f8ec35c24025ceb9b9019ae9040c1ab2f01cddc2bcc0b518af31c75c14"}, + {file = "websockets-13.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a9dcaf8b0cc72a392760bb8755922c03e17a5a54e08cca58e8b74f6902b433cf"}, + {file = "websockets-13.1-cp312-cp312-win32.whl", hash = "sha256:2f85cf4f2a1ba8f602298a853cec8526c2ca42a9a4b947ec236eaedb8f2dc80c"}, + {file = "websockets-13.1-cp312-cp312-win_amd64.whl", hash = "sha256:38377f8b0cdeee97c552d20cf1865695fcd56aba155ad1b4ca8779a5b6ef4ac3"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a9ab1e71d3d2e54a0aa646ab6d4eebfaa5f416fe78dfe4da2839525dc5d765c6"}, + {file = "websockets-13.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b9d7439d7fab4dce00570bb906875734df13d9faa4b48e261c440a5fec6d9708"}, + {file = "websockets-13.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:327b74e915cf13c5931334c61e1a41040e365d380f812513a255aa804b183418"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:325b1ccdbf5e5725fdcb1b0e9ad4d2545056479d0eee392c291c1bf76206435a"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:346bee67a65f189e0e33f520f253d5147ab76ae42493804319b5716e46dddf0f"}, + {file = "websockets-13.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91a0fa841646320ec0d3accdff5b757b06e2e5c86ba32af2e0815c96c7a603c5"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:18503d2c5f3943e93819238bf20df71982d193f73dcecd26c94514f417f6b135"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9cd1af7e18e5221d2878378fbc287a14cd527fdd5939ed56a18df8a31136bb2"}, + {file = "websockets-13.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:70c5be9f416aa72aab7a2a76c90ae0a4fe2755c1816c153c1a2bcc3333ce4ce6"}, + {file = "websockets-13.1-cp313-cp313-win32.whl", hash = "sha256:624459daabeb310d3815b276c1adef475b3e6804abaf2d9d2c061c319f7f187d"}, + {file = "websockets-13.1-cp313-cp313-win_amd64.whl", hash = "sha256:c518e84bb59c2baae725accd355c8dc517b4a3ed8db88b4bc93c78dae2974bf2"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c7934fd0e920e70468e676fe7f1b7261c1efa0d6c037c6722278ca0228ad9d0d"}, + {file = "websockets-13.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:149e622dc48c10ccc3d2760e5f36753db9cacf3ad7bc7bbbfd7d9c819e286f23"}, + {file = "websockets-13.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a569eb1b05d72f9bce2ebd28a1ce2054311b66677fcd46cf36204ad23acead8c"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:95df24ca1e1bd93bbca51d94dd049a984609687cb2fb08a7f2c56ac84e9816ea"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d8dbb1bf0c0a4ae8b40bdc9be7f644e2f3fb4e8a9aca7145bfa510d4a374eeb7"}, + {file = "websockets-13.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:035233b7531fb92a76beefcbf479504db8c72eb3bff41da55aecce3a0f729e54"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:e4450fc83a3df53dec45922b576e91e94f5578d06436871dce3a6be38e40f5db"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:463e1c6ec853202dd3657f156123d6b4dad0c546ea2e2e38be2b3f7c5b8e7295"}, + {file = "websockets-13.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:6d6855bbe70119872c05107e38fbc7f96b1d8cb047d95c2c50869a46c65a8e96"}, + {file = "websockets-13.1-cp38-cp38-win32.whl", hash = "sha256:204e5107f43095012b00f1451374693267adbb832d29966a01ecc4ce1db26faf"}, + {file = "websockets-13.1-cp38-cp38-win_amd64.whl", hash = "sha256:485307243237328c022bc908b90e4457d0daa8b5cf4b3723fd3c4a8012fce4c6"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9b37c184f8b976f0c0a231a5f3d6efe10807d41ccbe4488df8c74174805eea7d"}, + {file = "websockets-13.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:163e7277e1a0bd9fb3c8842a71661ad19c6aa7bb3d6678dc7f89b17fbcc4aeb7"}, + {file = "websockets-13.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4b889dbd1342820cc210ba44307cf75ae5f2f96226c0038094455a96e64fb07a"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:586a356928692c1fed0eca68b4d1c2cbbd1ca2acf2ac7e7ebd3b9052582deefa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7bd6abf1e070a6b72bfeb71049d6ad286852e285f146682bf30d0296f5fbadfa"}, + {file = "websockets-13.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d2aad13a200e5934f5a6767492fb07151e1de1d6079c003ab31e1823733ae79"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:df01aea34b6e9e33572c35cd16bae5a47785e7d5c8cb2b54b2acdb9678315a17"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e54affdeb21026329fb0744ad187cf812f7d3c2aa702a5edb562b325191fcab6"}, + {file = "websockets-13.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9ef8aa8bdbac47f4968a5d66462a2a0935d044bf35c0e5a8af152d58516dbeb5"}, + {file = "websockets-13.1-cp39-cp39-win32.whl", hash = "sha256:deeb929efe52bed518f6eb2ddc00cc496366a14c726005726ad62c2dd9017a3c"}, + {file = "websockets-13.1-cp39-cp39-win_amd64.whl", hash = "sha256:7c65ffa900e7cc958cd088b9a9157a8141c991f8c53d11087e6fb7277a03f81d"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5dd6da9bec02735931fccec99d97c29f47cc61f644264eb995ad6c0c27667238"}, + {file = "websockets-13.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:2510c09d8e8df777177ee3d40cd35450dc169a81e747455cc4197e63f7e7bfe5"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1c3cf67185543730888b20682fb186fc8d0fa6f07ccc3ef4390831ab4b388d9"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bcc03c8b72267e97b49149e4863d57c2d77f13fae12066622dc78fe322490fe6"}, + {file = "websockets-13.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:004280a140f220c812e65f36944a9ca92d766b6cc4560be652a0a3883a79ed8a"}, + {file = "websockets-13.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e2620453c075abeb0daa949a292e19f56de518988e079c36478bacf9546ced23"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9156c45750b37337f7b0b00e6248991a047be4aa44554c9886fe6bdd605aab3b"}, + {file = "websockets-13.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:80c421e07973a89fbdd93e6f2003c17d20b69010458d3a8e37fb47874bd67d51"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82d0ba76371769d6a4e56f7e83bb8e81846d17a6190971e38b5de108bde9b0d7"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9875a0143f07d74dc5e1ded1c4581f0d9f7ab86c78994e2ed9e95050073c94d"}, + {file = "websockets-13.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a11e38ad8922c7961447f35c7b17bffa15de4d17c70abd07bfbe12d6faa3e027"}, + {file = "websockets-13.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4059f790b6ae8768471cddb65d3c4fe4792b0ab48e154c9f0a04cefaabcd5978"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:25c35bf84bf7c7369d247f0b8cfa157f989862c49104c5cf85cb5436a641d93e"}, + {file = "websockets-13.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:83f91d8a9bb404b8c2c41a707ac7f7f75b9442a0a876df295de27251a856ad09"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7a43cfdcddd07f4ca2b1afb459824dd3c6d53a51410636a2c7fc97b9a8cf4842"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48a2ef1381632a2f0cb4efeff34efa97901c9fbc118e01951ad7cfc10601a9bb"}, + {file = "websockets-13.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:459bf774c754c35dbb487360b12c5727adab887f1622b8aed5755880a21c4a20"}, + {file = "websockets-13.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:95858ca14a9f6fa8413d29e0a585b31b278388aa775b8a81fa24830123874678"}, + {file = "websockets-13.1-py3-none-any.whl", hash = "sha256:a9a396a6ad26130cdae92ae10c36af09d9bfe6cafe69670fd3b6da9b07b4044f"}, + {file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"}, ] [[package]] @@ -4061,18 +4105,22 @@ pyodbc = ["pyodbc"] [[package]] name = "zipp" -version = "3.20.0" +version = "3.20.2" description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" files = [ - {file = "zipp-3.20.0-py3-none-any.whl", hash = "sha256:58da6168be89f0be59beb194da1250516fdaa062ccebd30127ac65d30045e10d"}, - {file = "zipp-3.20.0.tar.gz", hash = "sha256:0145e43d89664cfe1a2e533adc75adafed82fe2da404b4bbb6b026c0157bdb31"}, + {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, + {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, ] [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"] +type = ["pytest-mypy"] [extras] server = ["alembic", "arq", "authlib", "boto3", "cryptography", "email-validator", "fastapi", "orcid", "psycopg2", "python-jose", "python-multipart", "requests", "slack-sdk", "uvicorn", "watchtower"] @@ -4080,4 +4128,4 @@ server = ["alembic", "arq", "authlib", "boto3", "cryptography", "email-validator [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "4d11304f6faddc9525e8861e6b5819f7945e6dcfd3f90fe77b5070ab894a6e70" +content-hash = "2de247df51a9bf90242c2e716970dba3ab2ad1d9e27a4b225968f8873e128e55" diff --git a/pyproject.toml b/pyproject.toml index b1865dff..66a4d921 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -45,16 +45,16 @@ starlette-context = "^0.3.6" # Optional dependencies for running this application as a server alembic = { version = "~1.7.6", optional = true } arq = { version = "~0.25.0", optional = true } -authlib = { version = "~0.15.5", optional = true } +authlib = { version = "~1.3.1", optional = true } boto3 = { version = "~1.34.97", optional = true } -cryptography = { version = "~41.0.6", optional = true } +cryptography = { version = "~43.0.1", optional = true } email-validator = { version = "~2.1.1", optional = true } fastapi = { version = "~0.95.0", optional = true } orcid = { version = "~1.0.3", optional = true } psycopg2 = { version = "~2.9.3", optional = true } python-jose = { extras = ["cryptography"], version = "~3.3.0", optional = true } python-multipart = { version = "~0.0.5", optional = true } -requests = { version = "~2.31.0", optional = true } +requests = { version = "~2.32.0", optional = true } slack-sdk = { version = "~3.21.3", optional = true } uvicorn = { extras = ["standard"], version = "*", optional = true } watchtower = { version = "~3.2.0", optional = true } @@ -70,7 +70,7 @@ mypy = "~1.10.0" pre-commit = "*" jsonschema = "*" fakeredis = "~2.21.1" -pytest = "~7.0.1" +pytest = "~7.2.0" pytest-postgresql = "~5.0.0" pytest-asyncio = "~0.23.5" pytest-socket = "~0.6.0" From c65eac62a61a932e565e132827f228a06204ca3b Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Wed, 2 Oct 2024 16:59:39 +1000 Subject: [PATCH 23/58] Add router functions to check whether users have authorization in experiment, experiment set and score set. Add some related tests. --- src/mavedb/routers/experiment_sets.py | 37 ++++++++++++++++++++++ src/mavedb/routers/experiments.py | 35 +++++++++++++++++++++ src/mavedb/routers/score_sets.py | 34 ++++++++++++++++++++ tests/routers/test_experiment_set.py | 45 +++++++++++++++++++++++++++ tests/routers/test_experiments.py | 35 +++++++++++++++++++++ tests/routers/test_score_set.py | 38 ++++++++++++++++++++++ 6 files changed, 224 insertions(+) create mode 100644 tests/routers/test_experiment_set.py diff --git a/src/mavedb/routers/experiment_sets.py b/src/mavedb/routers/experiment_sets.py index a5060ec9..4a797c9a 100644 --- a/src/mavedb/routers/experiment_sets.py +++ b/src/mavedb/routers/experiment_sets.py @@ -4,10 +4,13 @@ from fastapi import APIRouter, Depends, HTTPException from sqlalchemy.orm import Session +from sqlalchemy import or_ from mavedb import deps +from mavedb.lib.authentication import get_current_user, UserData from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context +from mavedb.models.contributor import Contributor from mavedb.models.experiment_set import ExperimentSet from mavedb.view_models import experiment_set @@ -21,6 +24,40 @@ logger = logging.getLogger(__name__) +@router.get( + "/check-authorizations/{urn}", + status_code=200, + response_model=bool +) +async def check_experiment_set_authorization( + *, + urn: str, + db: Session = Depends(deps.get_db), + user_data: UserData = Depends(get_current_user), +) -> bool: + """ + Check whether users have authorizations in this experiment set. + """ + query = db.query(ExperimentSet).filter(ExperimentSet.urn == urn) + + if user_data is not None: + query = query.filter( + or_( + ExperimentSet.created_by_id == user_data.user.id, + ExperimentSet.contributors.any(Contributor.orcid_id == user_data.user.username), + ) + ) + else: + return False + + save_to_logging_context({"Experiment set requested resource": urn}) + item = query.first() + if item: + return True + else: + return False + + @router.get( "/{urn}", status_code=200, diff --git a/src/mavedb/routers/experiments.py b/src/mavedb/routers/experiments.py index 08237c7e..fbd625e4 100644 --- a/src/mavedb/routers/experiments.py +++ b/src/mavedb/routers/experiments.py @@ -45,6 +45,41 @@ ) +@router.get( + "/experiments/check-authorizations/{urn}", + status_code=200, + response_model=bool +) +async def check_experiment_authorization( + *, + urn: str, + db: Session = Depends(deps.get_db), + user_data: UserData = Depends(get_current_user), +) -> bool: + """ + Check whether users have authorizations in this experiment. + """ + query = db.query(Experiment).filter(Experiment.urn == urn) + + if user_data is not None: + query = query.filter( + or_( + Experiment.created_by_id == user_data.user.id, + Experiment.contributors.any(Contributor.orcid_id == user_data.user.username), + ) + ) + else: + return False + + save_to_logging_context({"Experiment requested resource": urn}) + item = query.first() + + if item: + return True + else: + return False + + # TODO: Rewrite this function. @router.get( "/experiments/", diff --git a/src/mavedb/routers/score_sets.py b/src/mavedb/routers/score_sets.py index 44fd2a30..dcf74f45 100644 --- a/src/mavedb/routers/score_sets.py +++ b/src/mavedb/routers/score_sets.py @@ -113,6 +113,40 @@ async def fetch_score_set_by_urn( ) +@router.get( + "/score-sets/check-authorizations/{urn}", + status_code=200, + response_model=bool +) +async def check_score_set_authorization( + *, + urn: str, + db: Session = Depends(deps.get_db), + user_data: UserData = Depends(get_current_user), +) -> bool: + """ + Check whether users have authorizations in this score set. + """ + query = db.query(ScoreSet).filter(ScoreSet.urn == urn) + + if user_data is not None: + query = query.filter( + or_( + ScoreSet.created_by_id == user_data.user.id, + ScoreSet.contributors.any(Contributor.orcid_id == user_data.user.username), + ) + ) + else: + return False + + save_to_logging_context({"Score set requested resource": urn}) + item = query.first() + if item: + return True + else: + return False + + @router.post("/score-sets/search", status_code=200, response_model=list[score_set.ShortScoreSet]) def search_score_sets(search: ScoreSetsSearch, db: Session = Depends(deps.get_db)) -> Any: # = Body(..., embed=True), """ diff --git a/tests/routers/test_experiment_set.py b/tests/routers/test_experiment_set.py new file mode 100644 index 00000000..734e42ec --- /dev/null +++ b/tests/routers/test_experiment_set.py @@ -0,0 +1,45 @@ +from tests.helpers.constants import TEST_USER +from tests.helpers.util import ( + add_contributor, + change_ownership, + create_experiment, +) +from mavedb.models.experiment import Experiment as ExperimentDbModel +from mavedb.models.experiment_set import ExperimentSet as ExperimentSetDbModel + + +def test_get_true_authorization_from_own_experiment_set_check(client, setup_router_db): + experiment = create_experiment(client) + response = client.get(f"/api/v1/experiment-sets/check-authorizations/{experiment['experimentSetUrn']}") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_experiment_set_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) + add_contributor( + session, + experiment["experimentSetUrn"], + ExperimentSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/experiment-sets/check-authorizations/{experiment['experimentSetUrn']}") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_false_authorization_from_other_users_experiment_set_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) + + response = client.get(f"/api/v1/experiment-sets/check-authorizations/{experiment['experimentSetUrn']}") + + assert response.status_code == 200 + assert response.json() == False \ No newline at end of file diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index b04d1625..c8e362af 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -559,6 +559,41 @@ def test_admin_can_update_other_users_public_experiment_set( assert response_data["title"] == "Second Experiment" +def test_get_true_authorization_from_own_experiment_check(client, setup_router_db): + experiment = create_experiment(client) + response = client.get(f"/api/v1/experiments/check-authorizations/{experiment['urn']}") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_experiment_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + add_contributor( + session, + experiment["urn"], + ExperimentDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/experiments/check-authorizations/{experiment['urn']}") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_false_authorization_from_other_users_experiment_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + + response = client.get(f"/api/v1/experiments/check-authorizations/{experiment['urn']}") + + assert response.status_code == 200 + assert response.json() == False + + def test_edit_preserves_optional_metadata(client, setup_router_db): pass diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index e0db83c2..4ec8fd1d 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -496,6 +496,44 @@ def test_admin_can_add_scores_and_counts_to_other_user_score_set(session, client assert score_set == response_data +def test_get_true_authorization_from_own_score_set_check(client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + response = client.get(f"/api/v1/score-sets/check-authorizations/{score_set['urn']}") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_score_set_check(session, client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + change_ownership(session, score_set["urn"], ScoreSetDbModel) + add_contributor( + session, + score_set["urn"], + ScoreSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/score-sets/check-authorizations/{score_set['urn']}") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_false_authorization_from_other_users_score_set_check(session, client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + change_ownership(session, score_set["urn"], ScoreSetDbModel) + + response = client.get(f"/api/v1/score-sets/check-authorizations/{score_set['urn']}") + + assert response.status_code == 200 + assert response.json() == False + + def test_publish_score_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) score_set = create_seq_score_set_with_variants( From 280edd4149b6f0f40f9a557376f1581f4ef625e9 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Fri, 4 Oct 2024 16:33:45 +1000 Subject: [PATCH 24/58] Modify original check authorization function. Based on this new function, add more tests and move original tests to one file. --- src/mavedb/routers/authorization.py | 71 ++++++ src/mavedb/routers/experiment_sets.py | 36 --- src/mavedb/routers/experiments.py | 36 --- src/mavedb/routers/score_sets.py | 34 --- src/mavedb/server_main.py | 2 + tests/routers/test_authorization.py | 331 ++++++++++++++++++++++++++ tests/routers/test_experiment_set.py | 45 ---- tests/routers/test_experiments.py | 35 --- tests/routers/test_score_set.py | 38 --- 9 files changed, 404 insertions(+), 224 deletions(-) create mode 100644 src/mavedb/routers/authorization.py create mode 100644 tests/routers/test_authorization.py delete mode 100644 tests/routers/test_experiment_set.py diff --git a/src/mavedb/routers/authorization.py b/src/mavedb/routers/authorization.py new file mode 100644 index 00000000..583101e4 --- /dev/null +++ b/src/mavedb/routers/authorization.py @@ -0,0 +1,71 @@ +import logging +from enum import Enum + +from fastapi import APIRouter, Depends, HTTPException +from sqlalchemy.orm import Session + +from mavedb import deps +from mavedb.lib.authentication import get_current_user, UserData +from mavedb.lib.permissions import has_permission, Action +from mavedb.lib.logging import LoggedRoute +from mavedb.lib.logging.context import logging_context, save_to_logging_context +from mavedb.models.experiment import Experiment +from mavedb.models.experiment_set import ExperimentSet +from mavedb.models.score_set import ScoreSet + +router = APIRouter( + prefix="/api/v1", + tags=["authorizations"], + responses={404: {"description": "Not found"}}, + route_class=LoggedRoute, +) + +logger = logging.getLogger(__name__) + + +class ModelName(str, Enum): + experiment = "experiment" + experiment_set = "experiment-set" + score_set = "score-set" + + +@router.get( + "/user-is-authorized/{model_name}/{urn}/{action}", + status_code=200, + response_model=bool +) +async def check_authorization( + *, + model_name: str, + urn: str, + action: str, + db: Session = Depends(deps.get_db), + user_data: UserData = Depends(get_current_user), +) -> bool: + """ + Check whether users have authorizations in adding/editing/deleting/publishing experiment or score set. + """ + save_to_logging_context({"requested_resource": urn}) + if model_name == ModelName.experiment_set: + item = db.query(ExperimentSet).filter(ExperimentSet.urn == urn).one_or_none() + elif model_name == ModelName.experiment: + item = db.query(Experiment).filter(Experiment.urn == urn).one_or_none() + elif model_name == ModelName.score_set: + item = db.query(ScoreSet).filter(ScoreSet.urn == urn).one_or_none() + else: + item = None + + if item: + if user_data: + try: + action_enum = Action[action.upper()] + permission = has_permission(user_data, item, action_enum).permitted + return permission + except KeyError: + raise HTTPException(status_code=400, detail=f"Invalid action: {action}") + else: + logger.debug(msg="Miss user data", extra=logging_context()) + raise HTTPException(status_code=404, detail=f"User not found") + else: + logger.debug(msg="The requested resources does not exist.", extra=logging_context()) + raise HTTPException(status_code=404, detail=f"{model_name} with URN '{urn}' not found") diff --git a/src/mavedb/routers/experiment_sets.py b/src/mavedb/routers/experiment_sets.py index eb235619..eeaaf34c 100644 --- a/src/mavedb/routers/experiment_sets.py +++ b/src/mavedb/routers/experiment_sets.py @@ -4,14 +4,12 @@ from fastapi import APIRouter, Depends, HTTPException from sqlalchemy.orm import Session -from sqlalchemy import or_ from mavedb import deps from mavedb.lib.authentication import get_current_user, UserData from mavedb.lib.permissions import has_permission, Action from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context -from mavedb.models.contributor import Contributor from mavedb.models.experiment_set import ExperimentSet from mavedb.view_models import experiment_set @@ -25,40 +23,6 @@ logger = logging.getLogger(__name__) -@router.get( - "/check-authorizations/{urn}", - status_code=200, - response_model=bool -) -async def check_experiment_set_authorization( - *, - urn: str, - db: Session = Depends(deps.get_db), - user_data: UserData = Depends(get_current_user), -) -> bool: - """ - Check whether users have authorizations in this experiment set. - """ - query = db.query(ExperimentSet).filter(ExperimentSet.urn == urn) - - if user_data is not None: - query = query.filter( - or_( - ExperimentSet.created_by_id == user_data.user.id, - ExperimentSet.contributors.any(Contributor.orcid_id == user_data.user.username), - ) - ) - else: - return False - - save_to_logging_context({"Experiment set requested resource": urn}) - item = query.first() - if item: - return True - else: - return False - - @router.get( "/{urn}", status_code=200, diff --git a/src/mavedb/routers/experiments.py b/src/mavedb/routers/experiments.py index e8a846b3..6ca47430 100644 --- a/src/mavedb/routers/experiments.py +++ b/src/mavedb/routers/experiments.py @@ -5,7 +5,6 @@ from fastapi import APIRouter, Depends, HTTPException from fastapi.encoders import jsonable_encoder import pydantic -from sqlalchemy import or_, and_ from sqlalchemy.orm import Session from mavedb import deps @@ -45,41 +44,6 @@ ) -@router.get( - "/experiments/check-authorizations/{urn}", - status_code=200, - response_model=bool -) -async def check_experiment_authorization( - *, - urn: str, - db: Session = Depends(deps.get_db), - user_data: UserData = Depends(get_current_user), -) -> bool: - """ - Check whether users have authorizations in this experiment. - """ - query = db.query(Experiment).filter(Experiment.urn == urn) - - if user_data is not None: - query = query.filter( - or_( - Experiment.created_by_id == user_data.user.id, - Experiment.contributors.any(Contributor.orcid_id == user_data.user.username), - ) - ) - else: - return False - - save_to_logging_context({"Experiment requested resource": urn}) - item = query.first() - - if item: - return True - else: - return False - - # TODO: Rewrite this function. @router.get( "/experiments/", diff --git a/src/mavedb/routers/score_sets.py b/src/mavedb/routers/score_sets.py index dcf74f45..44fd2a30 100644 --- a/src/mavedb/routers/score_sets.py +++ b/src/mavedb/routers/score_sets.py @@ -113,40 +113,6 @@ async def fetch_score_set_by_urn( ) -@router.get( - "/score-sets/check-authorizations/{urn}", - status_code=200, - response_model=bool -) -async def check_score_set_authorization( - *, - urn: str, - db: Session = Depends(deps.get_db), - user_data: UserData = Depends(get_current_user), -) -> bool: - """ - Check whether users have authorizations in this score set. - """ - query = db.query(ScoreSet).filter(ScoreSet.urn == urn) - - if user_data is not None: - query = query.filter( - or_( - ScoreSet.created_by_id == user_data.user.id, - ScoreSet.contributors.any(Contributor.orcid_id == user_data.user.username), - ) - ) - else: - return False - - save_to_logging_context({"Score set requested resource": urn}) - item = query.first() - if item: - return True - else: - return False - - @router.post("/score-sets/search", status_code=200, response_model=list[score_set.ShortScoreSet]) def search_score_sets(search: ScoreSetsSearch, db: Session = Depends(deps.get_db)) -> Any: # = Body(..., embed=True), """ diff --git a/src/mavedb/server_main.py b/src/mavedb/server_main.py index 8514fcae..597528f5 100644 --- a/src/mavedb/server_main.py +++ b/src/mavedb/server_main.py @@ -26,6 +26,7 @@ from mavedb.lib.logging.canonical import log_request from mavedb.routers import ( access_keys, + authorization, api_information, controlled_keywords, doi_identifiers, @@ -73,6 +74,7 @@ ) app.include_router(access_keys.router) app.include_router(api_information.router) +app.include_router(authorization.router) app.include_router(controlled_keywords.router) app.include_router(doi_identifiers.router) app.include_router(experiment_sets.router) diff --git a/tests/routers/test_authorization.py b/tests/routers/test_authorization.py new file mode 100644 index 00000000..52fd0893 --- /dev/null +++ b/tests/routers/test_authorization.py @@ -0,0 +1,331 @@ +from tests.helpers.constants import TEST_USER +from tests.helpers.util import ( + add_contributor, + change_ownership, + create_experiment, + create_seq_score_set, +) +from mavedb.models.experiment import Experiment as ExperimentDbModel +from mavedb.models.experiment_set import ExperimentSet as ExperimentSetDbModel +from mavedb.models.score_set import ScoreSet as ScoreSetDbModel + + +# Test check_authorization function +# Experiment set tests +def test_get_true_authorization_from_own_experiment_set_add_experiment_check(client, setup_router_db): + experiment = create_experiment(client) + response = client.get(f"/api/v1/user-is-authorized/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_experiment_set_add_experiment_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) + add_contributor( + session, + experiment["experimentSetUrn"], + ExperimentSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/user-is-authorized/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_false_authorization_from_other_users_experiment_set_add_experiment_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) + + response = client.get(f"/api/v1/user-is-authorized/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + + assert response.status_code == 200 + assert response.json() == False + + +def test_cannot_get_authorization_with_wrong_action_in_experiment_set(client, setup_router_db): + experiment = create_experiment(client) + response = client.get(f"/api/v1/user-is-authorized/experiment-set/{experiment['experimentSetUrn']}/edit") + + assert response.status_code == 400 + response_data = response.json() + assert response_data["detail"] == "Invalid action: edit" + + +def test_cannot_get_authorization_with_non_existing_experiment_set(client, setup_router_db): + response = client.get(f"/api/v1/user-is-authorized/experiment-set/invalidUrn/update") + + assert response.status_code == 404 + response_data = response.json() + assert response_data["detail"] == "experiment-set with URN 'invalidUrn' not found" + + +# Experiment tests +def test_get_true_authorization_from_own_experiment_update_check(client, setup_router_db): + experiment = create_experiment(client) + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/update") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_true_authorization_from_own_experiment_delete_check(client, setup_router_db): + experiment = create_experiment(client) + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/delete") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_true_authorization_from_own_experiment_add_score_set_check(client, setup_router_db): + experiment = create_experiment(client) + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/add_score_set") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_experiment_update_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + add_contributor( + session, + experiment["urn"], + ExperimentDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/update") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_experiment_delete_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + add_contributor( + session, + experiment["urn"], + ExperimentDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/delete") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_experiment_add_score_set_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + add_contributor( + session, + experiment["urn"], + ExperimentDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/add_score_set") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_false_authorization_from_other_users_experiment_add_score_set_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/add_score_set") + + assert response.status_code == 200 + assert response.json() == False + + +def test_get_false_authorization_from_other_users_experiment_update_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/update") + + assert response.status_code == 200 + assert response.json() == False + + +def test_get_false_authorization_from_other_users_experiment_delete_check(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/delete") + + assert response.status_code == 200 + assert response.json() == False + + +def test_cannot_get_authorization_with_wrong_action_in_experiment(client, setup_router_db): + experiment = create_experiment(client) + response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/invalidAction") + + assert response.status_code == 400 + response_data = response.json() + assert response_data["detail"] == "Invalid action: invalidAction" + + +def test_cannot_get_authorization_with_non_existing_experiment(client, setup_router_db): + response = client.get(f"/api/v1/user-is-authorized/experiment/invalidUrn/update") + + assert response.status_code == 404 + response_data = response.json() + assert response_data["detail"] == "experiment with URN 'invalidUrn' not found" + + +# Score set tests +def test_get_true_authorization_from_own_score_set_update_check(client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/update") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_true_authorization_from_own_score_set_delete_check(client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/delete") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_true_authorization_from_own_score_set_publish_check(client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/publish") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_score_set_update_check(session, client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + change_ownership(session, score_set["urn"], ScoreSetDbModel) + add_contributor( + session, + score_set["urn"], + ScoreSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/update") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_score_set_delete_check(session, client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + change_ownership(session, score_set["urn"], ScoreSetDbModel) + add_contributor( + session, + score_set["urn"], + ScoreSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/delete") + + assert response.status_code == 200 + assert response.json() == True + + +def test_contributor_gets_true_authorization_from_others_score_set_publish_check(session, client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + change_ownership(session, score_set["urn"], ScoreSetDbModel) + add_contributor( + session, + score_set["urn"], + ScoreSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/publish") + + assert response.status_code == 200 + assert response.json() == True + + +def test_get_false_authorization_from_other_users_score_set_delete_check(session, client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + change_ownership(session, score_set["urn"], ScoreSetDbModel) + + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/delete") + + assert response.status_code == 200 + assert response.json() == False + + +def test_get_false_authorization_from_other_users_score_set_update_check(session, client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + change_ownership(session, score_set["urn"], ScoreSetDbModel) + + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/update") + + assert response.status_code == 200 + assert response.json() == False + + +def test_get_false_authorization_from_other_users_score_set_publish_check(session, client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + change_ownership(session, score_set["urn"], ScoreSetDbModel) + + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/publish") + + assert response.status_code == 200 + assert response.json() == False + + +def test_cannot_get_authorization_with_wrong_action_in_score_set(client, setup_router_db): + experiment = create_experiment(client) + score_set = create_seq_score_set(client, experiment["urn"]) + response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/invalidAction") + + assert response.status_code == 400 + response_data = response.json() + assert response_data["detail"] == "Invalid action: invalidAction" + + +def test_cannot_get_authorization_with_non_existing_experiment(client, setup_router_db): + response = client.get(f"/api/v1/user-is-authorized/score-set/invalidUrn/update") + + assert response.status_code == 404 + response_data = response.json() + assert response_data["detail"] == "score-set with URN 'invalidUrn' not found" + + +# Common invalid test +def test_cannot_get_authorization_with_non_existing_item(client, setup_router_db): + response = client.get(f"/api/v1/user-is-authorized/invalidModel/invalidUrn/update") + + assert response.status_code == 404 + response_data = response.json() + assert response_data["detail"] == "invalidModel with URN 'invalidUrn' not found" diff --git a/tests/routers/test_experiment_set.py b/tests/routers/test_experiment_set.py deleted file mode 100644 index 734e42ec..00000000 --- a/tests/routers/test_experiment_set.py +++ /dev/null @@ -1,45 +0,0 @@ -from tests.helpers.constants import TEST_USER -from tests.helpers.util import ( - add_contributor, - change_ownership, - create_experiment, -) -from mavedb.models.experiment import Experiment as ExperimentDbModel -from mavedb.models.experiment_set import ExperimentSet as ExperimentSetDbModel - - -def test_get_true_authorization_from_own_experiment_set_check(client, setup_router_db): - experiment = create_experiment(client) - response = client.get(f"/api/v1/experiment-sets/check-authorizations/{experiment['experimentSetUrn']}") - - assert response.status_code == 200 - assert response.json() == True - - -def test_contributor_gets_true_authorization_from_others_experiment_set_check(session, client, setup_router_db): - experiment = create_experiment(client) - change_ownership(session, experiment["urn"], ExperimentDbModel) - change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) - add_contributor( - session, - experiment["experimentSetUrn"], - ExperimentSetDbModel, - TEST_USER["username"], - TEST_USER["first_name"], - TEST_USER["last_name"], - ) - response = client.get(f"/api/v1/experiment-sets/check-authorizations/{experiment['experimentSetUrn']}") - - assert response.status_code == 200 - assert response.json() == True - - -def test_get_false_authorization_from_other_users_experiment_set_check(session, client, setup_router_db): - experiment = create_experiment(client) - change_ownership(session, experiment["urn"], ExperimentDbModel) - change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) - - response = client.get(f"/api/v1/experiment-sets/check-authorizations/{experiment['experimentSetUrn']}") - - assert response.status_code == 200 - assert response.json() == False \ No newline at end of file diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index 6241ed3a..51c29073 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -562,41 +562,6 @@ def test_admin_can_update_other_users_public_experiment_set( assert response_data["title"] == "Second Experiment" -def test_get_true_authorization_from_own_experiment_check(client, setup_router_db): - experiment = create_experiment(client) - response = client.get(f"/api/v1/experiments/check-authorizations/{experiment['urn']}") - - assert response.status_code == 200 - assert response.json() == True - - -def test_contributor_gets_true_authorization_from_others_experiment_check(session, client, setup_router_db): - experiment = create_experiment(client) - change_ownership(session, experiment["urn"], ExperimentDbModel) - add_contributor( - session, - experiment["urn"], - ExperimentDbModel, - TEST_USER["username"], - TEST_USER["first_name"], - TEST_USER["last_name"], - ) - response = client.get(f"/api/v1/experiments/check-authorizations/{experiment['urn']}") - - assert response.status_code == 200 - assert response.json() == True - - -def test_get_false_authorization_from_other_users_experiment_check(session, client, setup_router_db): - experiment = create_experiment(client) - change_ownership(session, experiment["urn"], ExperimentDbModel) - - response = client.get(f"/api/v1/experiments/check-authorizations/{experiment['urn']}") - - assert response.status_code == 200 - assert response.json() == False - - def test_edit_preserves_optional_metadata(client, setup_router_db): pass diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index 4599b84e..b9cbc5b5 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -496,44 +496,6 @@ def test_admin_can_add_scores_and_counts_to_other_user_score_set(session, client assert score_set == response_data -def test_get_true_authorization_from_own_score_set_check(client, setup_router_db): - experiment = create_experiment(client) - score_set = create_seq_score_set(client, experiment["urn"]) - response = client.get(f"/api/v1/score-sets/check-authorizations/{score_set['urn']}") - - assert response.status_code == 200 - assert response.json() == True - - -def test_contributor_gets_true_authorization_from_others_score_set_check(session, client, setup_router_db): - experiment = create_experiment(client) - score_set = create_seq_score_set(client, experiment["urn"]) - change_ownership(session, score_set["urn"], ScoreSetDbModel) - add_contributor( - session, - score_set["urn"], - ScoreSetDbModel, - TEST_USER["username"], - TEST_USER["first_name"], - TEST_USER["last_name"], - ) - response = client.get(f"/api/v1/score-sets/check-authorizations/{score_set['urn']}") - - assert response.status_code == 200 - assert response.json() == True - - -def test_get_false_authorization_from_other_users_score_set_check(session, client, setup_router_db): - experiment = create_experiment(client) - score_set = create_seq_score_set(client, experiment["urn"]) - change_ownership(session, score_set["urn"], ScoreSetDbModel) - - response = client.get(f"/api/v1/score-sets/check-authorizations/{score_set['urn']}") - - assert response.status_code == 200 - assert response.json() == False - - def test_publish_score_set(session, data_provider, client, setup_router_db, data_files): experiment = create_experiment(client) score_set = create_seq_score_set_with_variants( From 83d2bf01f17fd7d0c79074f40a144355ca45efe4 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Fri, 4 Oct 2024 17:02:38 +1000 Subject: [PATCH 25/58] Solve poetry error --- src/mavedb/routers/authorization.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/mavedb/routers/authorization.py b/src/mavedb/routers/authorization.py index 583101e4..3c5559d0 100644 --- a/src/mavedb/routers/authorization.py +++ b/src/mavedb/routers/authorization.py @@ -3,6 +3,7 @@ from fastapi import APIRouter, Depends, HTTPException from sqlalchemy.orm import Session +from typing import Union, Optional from mavedb import deps from mavedb.lib.authentication import get_current_user, UserData @@ -46,14 +47,15 @@ async def check_authorization( Check whether users have authorizations in adding/editing/deleting/publishing experiment or score set. """ save_to_logging_context({"requested_resource": urn}) + + item: Optional[Union[ExperimentSet, Experiment, ScoreSet]] = None + if model_name == ModelName.experiment_set: item = db.query(ExperimentSet).filter(ExperimentSet.urn == urn).one_or_none() elif model_name == ModelName.experiment: item = db.query(Experiment).filter(Experiment.urn == urn).one_or_none() elif model_name == ModelName.score_set: item = db.query(ScoreSet).filter(ScoreSet.urn == urn).one_or_none() - else: - item = None if item: if user_data: From 23fd482ccf84e5066d53c629b7a4576602b96976 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Tue, 8 Oct 2024 17:32:17 +1100 Subject: [PATCH 26/58] Modify codes and tests. --- src/mavedb/lib/permissions.py | 16 ++-- .../{authorization.py => permissions.py} | 20 ++--- src/mavedb/server_main.py | 4 +- ...t_authorization.py => test_permissions.py} | 74 ++++++++++--------- 4 files changed, 56 insertions(+), 58 deletions(-) rename src/mavedb/routers/{authorization.py => permissions.py} (75%) rename tests/routers/{test_authorization.py => test_permissions.py} (70%) diff --git a/src/mavedb/lib/permissions.py b/src/mavedb/lib/permissions.py index db3b05e1..7bbe3e56 100644 --- a/src/mavedb/lib/permissions.py +++ b/src/mavedb/lib/permissions.py @@ -15,14 +15,14 @@ class Action(Enum): - READ = 1 - UPDATE = 2 - DELETE = 3 - ADD_EXPERIMENT = 4 - ADD_SCORE_SET = 5 - SET_SCORES = 6 - ADD_ROLE = 7 - PUBLISH = 8 + READ = 'read' + UPDATE = 'update' + DELETE = 'delete' + ADD_EXPERIMENT = 'add_experiment' + ADD_SCORE_SET = 'add_score_set' + SET_SCORES = 'set_scores' + ADD_ROLE = 'add_role' + PUBLISH = 'publish' class PermissionResponse: diff --git a/src/mavedb/routers/authorization.py b/src/mavedb/routers/permissions.py similarity index 75% rename from src/mavedb/routers/authorization.py rename to src/mavedb/routers/permissions.py index 3c5559d0..8d9a17b2 100644 --- a/src/mavedb/routers/authorization.py +++ b/src/mavedb/routers/permissions.py @@ -15,8 +15,8 @@ from mavedb.models.score_set import ScoreSet router = APIRouter( - prefix="/api/v1", - tags=["authorizations"], + prefix="/api/v1/permissions", + tags=["permissions"], responses={404: {"description": "Not found"}}, route_class=LoggedRoute, ) @@ -31,7 +31,7 @@ class ModelName(str, Enum): @router.get( - "/user-is-authorized/{model_name}/{urn}/{action}", + "/user-is-permitted/{model_name}/{urn}/{action}", status_code=200, response_model=bool ) @@ -39,7 +39,7 @@ async def check_authorization( *, model_name: str, urn: str, - action: str, + action: Action, db: Session = Depends(deps.get_db), user_data: UserData = Depends(get_current_user), ) -> bool: @@ -58,16 +58,8 @@ async def check_authorization( item = db.query(ScoreSet).filter(ScoreSet.urn == urn).one_or_none() if item: - if user_data: - try: - action_enum = Action[action.upper()] - permission = has_permission(user_data, item, action_enum).permitted - return permission - except KeyError: - raise HTTPException(status_code=400, detail=f"Invalid action: {action}") - else: - logger.debug(msg="Miss user data", extra=logging_context()) - raise HTTPException(status_code=404, detail=f"User not found") + permission = has_permission(user_data, item, action).permitted + return permission else: logger.debug(msg="The requested resources does not exist.", extra=logging_context()) raise HTTPException(status_code=404, detail=f"{model_name} with URN '{urn}' not found") diff --git a/src/mavedb/server_main.py b/src/mavedb/server_main.py index 597528f5..b8429d1d 100644 --- a/src/mavedb/server_main.py +++ b/src/mavedb/server_main.py @@ -26,7 +26,6 @@ from mavedb.lib.logging.canonical import log_request from mavedb.routers import ( access_keys, - authorization, api_information, controlled_keywords, doi_identifiers, @@ -37,6 +36,7 @@ log, mapped_variant, orcid, + permissions, publication_identifiers, target_gene_identifiers, taxonomies, @@ -74,7 +74,6 @@ ) app.include_router(access_keys.router) app.include_router(api_information.router) -app.include_router(authorization.router) app.include_router(controlled_keywords.router) app.include_router(doi_identifiers.router) app.include_router(experiment_sets.router) @@ -84,6 +83,7 @@ # app.include_router(log.router) app.include_router(mapped_variant.router) app.include_router(orcid.router) +app.include_router(permissions.router) app.include_router(publication_identifiers.router) app.include_router(raw_read_identifiers.router) app.include_router(score_sets.router) diff --git a/tests/routers/test_authorization.py b/tests/routers/test_permissions.py similarity index 70% rename from tests/routers/test_authorization.py rename to tests/routers/test_permissions.py index 52fd0893..669c3194 100644 --- a/tests/routers/test_authorization.py +++ b/tests/routers/test_permissions.py @@ -14,7 +14,7 @@ # Experiment set tests def test_get_true_authorization_from_own_experiment_set_add_experiment_check(client, setup_router_db): experiment = create_experiment(client) - response = client.get(f"/api/v1/user-is-authorized/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment") assert response.status_code == 200 assert response.json() == True @@ -32,7 +32,7 @@ def test_contributor_gets_true_authorization_from_others_experiment_set_add_expe TEST_USER["first_name"], TEST_USER["last_name"], ) - response = client.get(f"/api/v1/user-is-authorized/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment") assert response.status_code == 200 assert response.json() == True @@ -43,7 +43,7 @@ def test_get_false_authorization_from_other_users_experiment_set_add_experiment_ change_ownership(session, experiment["urn"], ExperimentDbModel) change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) - response = client.get(f"/api/v1/user-is-authorized/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment") assert response.status_code == 200 assert response.json() == False @@ -51,15 +51,17 @@ def test_get_false_authorization_from_other_users_experiment_set_add_experiment_ def test_cannot_get_authorization_with_wrong_action_in_experiment_set(client, setup_router_db): experiment = create_experiment(client) - response = client.get(f"/api/v1/user-is-authorized/experiment-set/{experiment['experimentSetUrn']}/edit") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/edit") - assert response.status_code == 400 + assert response.status_code == 422 response_data = response.json() - assert response_data["detail"] == "Invalid action: edit" + assert response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " \ + "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," \ + " 'add_role', 'publish'" def test_cannot_get_authorization_with_non_existing_experiment_set(client, setup_router_db): - response = client.get(f"/api/v1/user-is-authorized/experiment-set/invalidUrn/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/invalidUrn/update") assert response.status_code == 404 response_data = response.json() @@ -69,7 +71,7 @@ def test_cannot_get_authorization_with_non_existing_experiment_set(client, setup # Experiment tests def test_get_true_authorization_from_own_experiment_update_check(client, setup_router_db): experiment = create_experiment(client) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/update") assert response.status_code == 200 assert response.json() == True @@ -77,7 +79,7 @@ def test_get_true_authorization_from_own_experiment_update_check(client, setup_r def test_get_true_authorization_from_own_experiment_delete_check(client, setup_router_db): experiment = create_experiment(client) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/delete") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/delete") assert response.status_code == 200 assert response.json() == True @@ -85,7 +87,7 @@ def test_get_true_authorization_from_own_experiment_delete_check(client, setup_r def test_get_true_authorization_from_own_experiment_add_score_set_check(client, setup_router_db): experiment = create_experiment(client) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/add_score_set") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") assert response.status_code == 200 assert response.json() == True @@ -102,7 +104,7 @@ def test_contributor_gets_true_authorization_from_others_experiment_update_check TEST_USER["first_name"], TEST_USER["last_name"], ) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/update") assert response.status_code == 200 assert response.json() == True @@ -119,7 +121,7 @@ def test_contributor_gets_true_authorization_from_others_experiment_delete_check TEST_USER["first_name"], TEST_USER["last_name"], ) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/delete") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/delete") assert response.status_code == 200 assert response.json() == True @@ -136,7 +138,7 @@ def test_contributor_gets_true_authorization_from_others_experiment_add_score_se TEST_USER["first_name"], TEST_USER["last_name"], ) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/add_score_set") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") assert response.status_code == 200 assert response.json() == True @@ -146,7 +148,7 @@ def test_get_false_authorization_from_other_users_experiment_add_score_set_check experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/add_score_set") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") assert response.status_code == 200 assert response.json() == False @@ -156,7 +158,7 @@ def test_get_false_authorization_from_other_users_experiment_update_check(sessio experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/update") assert response.status_code == 200 assert response.json() == False @@ -166,7 +168,7 @@ def test_get_false_authorization_from_other_users_experiment_delete_check(sessio experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/delete") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/delete") assert response.status_code == 200 assert response.json() == False @@ -174,15 +176,17 @@ def test_get_false_authorization_from_other_users_experiment_delete_check(sessio def test_cannot_get_authorization_with_wrong_action_in_experiment(client, setup_router_db): experiment = create_experiment(client) - response = client.get(f"/api/v1/user-is-authorized/experiment/{experiment['urn']}/invalidAction") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/invalidAction") - assert response.status_code == 400 + assert response.status_code == 422 response_data = response.json() - assert response_data["detail"] == "Invalid action: invalidAction" + assert response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " \ + "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," \ + " 'add_role', 'publish'" def test_cannot_get_authorization_with_non_existing_experiment(client, setup_router_db): - response = client.get(f"/api/v1/user-is-authorized/experiment/invalidUrn/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/invalidUrn/update") assert response.status_code == 404 response_data = response.json() @@ -193,7 +197,7 @@ def test_cannot_get_authorization_with_non_existing_experiment(client, setup_rou def test_get_true_authorization_from_own_score_set_update_check(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/update") assert response.status_code == 200 assert response.json() == True @@ -202,7 +206,7 @@ def test_get_true_authorization_from_own_score_set_update_check(client, setup_ro def test_get_true_authorization_from_own_score_set_delete_check(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/delete") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/delete") assert response.status_code == 200 assert response.json() == True @@ -211,7 +215,7 @@ def test_get_true_authorization_from_own_score_set_delete_check(client, setup_ro def test_get_true_authorization_from_own_score_set_publish_check(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/publish") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/publish") assert response.status_code == 200 assert response.json() == True @@ -229,7 +233,7 @@ def test_contributor_gets_true_authorization_from_others_score_set_update_check( TEST_USER["first_name"], TEST_USER["last_name"], ) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/update") assert response.status_code == 200 assert response.json() == True @@ -247,7 +251,7 @@ def test_contributor_gets_true_authorization_from_others_score_set_delete_check( TEST_USER["first_name"], TEST_USER["last_name"], ) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/delete") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/delete") assert response.status_code == 200 assert response.json() == True @@ -265,7 +269,7 @@ def test_contributor_gets_true_authorization_from_others_score_set_publish_check TEST_USER["first_name"], TEST_USER["last_name"], ) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/publish") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/publish") assert response.status_code == 200 assert response.json() == True @@ -276,7 +280,7 @@ def test_get_false_authorization_from_other_users_score_set_delete_check(session score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/delete") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/delete") assert response.status_code == 200 assert response.json() == False @@ -287,7 +291,7 @@ def test_get_false_authorization_from_other_users_score_set_update_check(session score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/update") assert response.status_code == 200 assert response.json() == False @@ -298,7 +302,7 @@ def test_get_false_authorization_from_other_users_score_set_publish_check(sessio score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/publish") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/publish") assert response.status_code == 200 assert response.json() == False @@ -307,15 +311,17 @@ def test_get_false_authorization_from_other_users_score_set_publish_check(sessio def test_cannot_get_authorization_with_wrong_action_in_score_set(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) - response = client.get(f"/api/v1/user-is-authorized/score-set/{score_set['urn']}/invalidAction") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/invalidAction") - assert response.status_code == 400 + assert response.status_code == 422 response_data = response.json() - assert response_data["detail"] == "Invalid action: invalidAction" + assert response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " \ + "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," \ + " 'add_role', 'publish'" def test_cannot_get_authorization_with_non_existing_experiment(client, setup_router_db): - response = client.get(f"/api/v1/user-is-authorized/score-set/invalidUrn/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/invalidUrn/update") assert response.status_code == 404 response_data = response.json() @@ -324,7 +330,7 @@ def test_cannot_get_authorization_with_non_existing_experiment(client, setup_rou # Common invalid test def test_cannot_get_authorization_with_non_existing_item(client, setup_router_db): - response = client.get(f"/api/v1/user-is-authorized/invalidModel/invalidUrn/update") + response = client.get(f"/api/v1/permissions/user-is-permitted/invalidModel/invalidUrn/update") assert response.status_code == 404 response_data = response.json() From 44e12ae8232b7715f706de155d9a8a3f5b04e33d Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Wed, 9 Oct 2024 11:47:52 +1100 Subject: [PATCH 27/58] Find a bug in has_permission function. A test can't pass due to it. --- tests/routers/test_permissions.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/routers/test_permissions.py b/tests/routers/test_permissions.py index 669c3194..2ab55934 100644 --- a/tests/routers/test_permissions.py +++ b/tests/routers/test_permissions.py @@ -144,14 +144,15 @@ def test_contributor_gets_true_authorization_from_others_experiment_add_score_se assert response.json() == True -def test_get_false_authorization_from_other_users_experiment_add_score_set_check(session, client, setup_router_db): +# TODO: This one has problem. Need to fix has_permission function from lib/permissions.py first +def test_get_true_authorization_from_other_users_experiment_add_score_set_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") assert response.status_code == 200 - assert response.json() == False + assert response.json() == True def test_get_false_authorization_from_other_users_experiment_update_check(session, client, setup_router_db): From a7787afd95101b7e5ae1c24bf38cee086b3cad50 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 9 Oct 2024 10:32:37 -0700 Subject: [PATCH 28/58] Rework Dependencies for Minimal Non-Server Configuration Reworks core dependency bloat so that view models are importable via the base `mavedb` module. Packages `biocommons`, `hgvs`, `starlette`, `starlette-context`, and `cdot` have been refactored into the server optional dependency group. To complete this change: - The default logging configuration now excludes `watchtower`, which is a server dependency. Note that servers running this package should use `LOG_CONFIG=server` to retain `watchtower` functionality. - The method for resolving pydantic forward references has been updated to make these references resolvable via direct import rather than module level imports. Circular model dependencies should be defined in both files, and then both models should have their forward references updated. This would have raised an exception during OpenAPI schema generation if models were not resolved prior, so view models are now imported and have their references resolved on import of the router module. - The `pyyaml` and `email_validator` packages were added to our core dependency list, as pydantic validation of certain models depends on their presence. --- pyproject.toml | 15 +++--- src/mavedb/logging/__init__.py | 22 +++++--- .../logging/configurations/default.yaml | 19 +------ src/mavedb/logging/configurations/server.yaml | 53 +++++++++++++++++++ src/mavedb/routers/__init__.py | 2 + src/mavedb/view_models/score_set.py | 27 ++++++++-- 6 files changed, 101 insertions(+), 37 deletions(-) create mode 100644 src/mavedb/logging/configurations/server.yaml diff --git a/pyproject.toml b/pyproject.toml index 66a4d921..30597272 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,12 +26,11 @@ python = "^3.9" fqfa = "~1.3.0" pyhumps = "~3.8.0" +pyyaml = "~5.1" IDUtils = "~1.2.0" mavehgvs = "~0.6.0" eutils = "~0.6.0" -hgvs = "~1.5.4" -biocommons = "~0.0.0" -cdot = "~0.2.21" +email_validator = "~2.1.1" numpy = "~1.26" httpx = "~0.26.0" pandas = "~1.4.1" @@ -39,22 +38,24 @@ pydantic = "~1.10" python-dotenv = "~0.20.0" python-json-logger = "~2.0.7" SQLAlchemy = "~2.0.0" -starlette = "~0.27.0" -starlette-context = "^0.3.6" # Optional dependencies for running this application as a server alembic = { version = "~1.7.6", optional = true } arq = { version = "~0.25.0", optional = true } authlib = { version = "~1.3.1", optional = true } boto3 = { version = "~1.34.97", optional = true } +biocommons = { version = "~0.0.0", optional = true } cryptography = { version = "~43.0.1", optional = true } -email-validator = { version = "~2.1.1", optional = true } +cdot = { version = "~0.2.21", optional = true } fastapi = { version = "~0.95.0", optional = true } +hgvs = { version = "~1.5.4", optional = true } orcid = { version = "~1.0.3", optional = true } psycopg2 = { version = "~2.9.3", optional = true } python-jose = { extras = ["cryptography"], version = "~3.3.0", optional = true } python-multipart = { version = "~0.0.5", optional = true } requests = { version = "~2.32.0", optional = true } +starlette = { version = "~0.27.0", optional = true } +starlette-context = { version = "^0.3.6", optional = true } slack-sdk = { version = "~3.21.3", optional = true } uvicorn = { extras = ["standard"], version = "*", optional = true } watchtower = { version = "~3.2.0", optional = true } @@ -84,7 +85,7 @@ SQLAlchemy = { extras = ["mypy"], version = "~2.0.0" } [tool.poetry.extras] -server = ["alembic", "arq", "authlib", "boto3", "cryptography", "fastapi", "email-validator", "orcid", "psycopg2", "python-jose", "python-multipart", "requests", "slack-sdk", "uvicorn", "watchtower"] +server = ["alembic", "arq", "authlib", "biocommons", "boto3", "cdot", "cryptography", "fastapi", "hgvs", "orcid", "psycopg2", "python-jose", "python-multipart", "requests", "starlette", "starlette-context", "slack-sdk", "uvicorn", "watchtower"] [tool.black] diff --git a/src/mavedb/logging/__init__.py b/src/mavedb/logging/__init__.py index 51e0ab2c..a8aa759d 100644 --- a/src/mavedb/logging/__init__.py +++ b/src/mavedb/logging/__init__.py @@ -3,12 +3,18 @@ import os import sys -from watchtower import CloudWatchLogHandler - from .config import load_stock_config from .filters import canonical_only from .formatters import MavedbJsonFormatter +WATCHTOWER_IMPORTED = False +try: + from watchtower import CloudWatchLogHandler + + WATCHTOWER_IMPORTED = True +except ModuleNotFoundError: + pass + LOG_CONFIG = os.environ.get("LOG_CONFIG") @@ -35,12 +41,14 @@ def configure(): # Formatter and handler are un-configurable via file config. cw_is_enabled = False root_logger = logging.getLogger("root") - for handler in root_logger.handlers: - if isinstance(handler, CloudWatchLogHandler): - handler.addFilter(canonical_only) - handler.formatter = MavedbJsonFormatter() - cw_is_enabled = True + if WATCHTOWER_IMPORTED: + for handler in root_logger.handlers: + if isinstance(handler, CloudWatchLogHandler): + handler.addFilter(canonical_only) + handler.formatter = MavedbJsonFormatter() + + cw_is_enabled = True if not cw_is_enabled: root_logger.info("CloudWatch log handler is not enabled. Canonical logs will only be emitted to stdout.") diff --git a/src/mavedb/logging/configurations/default.yaml b/src/mavedb/logging/configurations/default.yaml index b5c622d7..a685f66c 100644 --- a/src/mavedb/logging/configurations/default.yaml +++ b/src/mavedb/logging/configurations/default.yaml @@ -9,7 +9,6 @@ root: propagate: true handlers: - console - - watchtower loggers: urllib3: @@ -18,10 +17,6 @@ loggers: botocore: level: INFO - # HGVS emits a log line on startup, silence it. - hgvs: - level: WARNING - # To log all database queries, set this to INFO sqlalchemy: level: WARNING @@ -35,19 +30,7 @@ handlers: stream: ext://sys.stdout formatter: json - watchtower: - class: watchtower.CloudWatchLogHandler - level: !coalesce - - !LOG_LEVEL - - INFO - log_group_name: !CLOUD_WATCH_LOG_GROUP - log_stream_name: "{machine_name}/{logger_name}/{strftime:%y-%m-%d}" - send_interval: 10 - use_queues: !CLOUD_WATCH_USE_QUEUES - create_log_group: True - formatter: json - formatters: json: - class: mavedb.logging.MavedbJsonFormatter + class: pythonjsonlogger.jsonlogger.JsonFormatter format: "%(message)s" diff --git a/src/mavedb/logging/configurations/server.yaml b/src/mavedb/logging/configurations/server.yaml new file mode 100644 index 00000000..b5c622d7 --- /dev/null +++ b/src/mavedb/logging/configurations/server.yaml @@ -0,0 +1,53 @@ +version: 1 +disable_existing_loggers: False + +root: + # Filtering of messages by level is done at the handler level by using NOTSET + # on the root logger to emit everything. This lets us keep console output + # readable while emitting verbose output to alternate handlers. + level: NOTSET + propagate: true + handlers: + - console + - watchtower + +loggers: + urllib3: + level: INFO + + botocore: + level: INFO + + # HGVS emits a log line on startup, silence it. + hgvs: + level: WARNING + + # To log all database queries, set this to INFO + sqlalchemy: + level: WARNING + +handlers: + console: + class: logging.StreamHandler + level: !coalesce + - !LOG_LEVEL + - INFO + stream: ext://sys.stdout + formatter: json + + watchtower: + class: watchtower.CloudWatchLogHandler + level: !coalesce + - !LOG_LEVEL + - INFO + log_group_name: !CLOUD_WATCH_LOG_GROUP + log_stream_name: "{machine_name}/{logger_name}/{strftime:%y-%m-%d}" + send_interval: 10 + use_queues: !CLOUD_WATCH_USE_QUEUES + create_log_group: True + formatter: json + +formatters: + json: + class: mavedb.logging.MavedbJsonFormatter + format: "%(message)s" diff --git a/src/mavedb/routers/__init__.py b/src/mavedb/routers/__init__.py index e69de29b..cb7e81af 100644 --- a/src/mavedb/routers/__init__.py +++ b/src/mavedb/routers/__init__.py @@ -0,0 +1,2 @@ +# Import view models up front so any deferred forward references are resolved prior to OpenAPI spec generation. +import mavedb.view_models diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index b6cc9064..463431c6 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -20,7 +20,6 @@ DoiIdentifierCreate, SavedDoiIdentifier, ) -from mavedb.view_models.experiment import Experiment, SavedExperiment from mavedb.view_models.license import ShortLicense from mavedb.view_models.publication_identifier import ( PublicationIdentifier, @@ -92,7 +91,13 @@ def targets_need_labels_when_multiple_targets_exist(cls, field_value, values): if target.target_sequence and target.target_sequence.label is None: raise ValidationError( "Target sequence labels cannot be empty when multiple targets are defined.", - custom_loc=["body", "targetGene", idx, "targetSequence", "label"], + custom_loc=[ + "body", + "targetGene", + idx, + "targetSequence", + "label", + ], ) return field_value @@ -111,7 +116,13 @@ def target_labels_are_unique(cls, field_value, values): # just one for now seems fine. raise ValidationError( "Target sequence labels cannot be duplicated.", - custom_loc=["body", "targetGene", dup_indices[-1], "targetSequence", "label"], + custom_loc=[ + "body", + "targetGene", + dup_indices[-1], + "targetSequence", + "label", + ], ) return field_value @@ -193,7 +204,7 @@ class ShortScoreSet(BaseModel): published_date: Optional[date] replaces_id: Optional[int] num_variants: int - experiment: Experiment + experiment: "Experiment" primary_publication_identifiers: list[SavedPublicationIdentifier] secondary_publication_identifiers: list[SavedPublicationIdentifier] license: ShortLicense @@ -260,7 +271,7 @@ def camelize_dataset_columns_keys(cls, value) -> dict: class ScoreSet(SavedScoreSet): """Score set view model containing most properties visible to non-admin users, but no variant data.""" - experiment: Experiment + experiment: "Experiment" doi_identifiers: Sequence[DoiIdentifier] primary_publication_identifiers: Sequence[PublicationIdentifier] secondary_publication_identifiers: Sequence[PublicationIdentifier] @@ -304,3 +315,9 @@ class ScoreSetPublicDump(SavedScoreSet): processing_errors: Optional[Dict] mapping_state: Optional[MappingState] mapping_errors: Optional[Dict] + + +from mavedb.view_models.experiment import Experiment + +ShorterScoreSet.update_forward_refs() +ScoreSet.update_forward_refs() From e07c4855d77abe77820e20ced48597c8b4c149cb Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 9 Oct 2024 10:39:32 -0700 Subject: [PATCH 29/58] Lock Dependencies in Updated State --- poetry.lock | 612 +++++++++++++++++++++++++--------------------------- 1 file changed, 294 insertions(+), 318 deletions(-) diff --git a/poetry.lock b/poetry.lock index a481313c..c8a5fab0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -63,7 +63,7 @@ watch = ["watchfiles (>=0.16)"] name = "asttokens" version = "2.4.1" description = "Annotate AST trees with source code positions" -optional = false +optional = true python-versions = "*" files = [ {file = "asttokens-2.4.1-py2.py3-none-any.whl", hash = "sha256:051ed49c3dcae8913ea7cd08e46a606dba30b79993209636c4875bc1d637bc24"}, @@ -146,7 +146,7 @@ lxml = ["lxml"] name = "biocommons" version = "0.0.0" description = "ensures that the biocommons namespace is correctly declared" -optional = false +optional = true python-versions = "*" files = [ {file = "biocommons-0.0.0-py3-none-any.whl", hash = "sha256:2cfa1cd2d97bdd42ea0ea6ec143021b79ad5a94a92303d492dc2eb505026d63a"}, @@ -157,7 +157,7 @@ files = [ name = "biocommons-seqrepo" version = "0.6.9" description = "Non-redundant, compressed, journalled, file-based storage for biological sequences" -optional = false +optional = true python-versions = ">=3.9" files = [ {file = "biocommons.seqrepo-0.6.9-py3-none-any.whl", hash = "sha256:f16c9131fc08aa06ed50c385e3a0ffefc092ea39393571a0b44d2f2349d5f495"}, @@ -182,7 +182,7 @@ docs = ["mkdocs"] name = "bioutils" version = "0.5.7" description = "miscellaneous simple bioinformatics utilities and lookup tables" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "bioutils-0.5.7-py2.py3-none-any.whl", hash = "sha256:b9e994626d234ddef65c15b76d405a35255912c66cfe71e39665859154cbfdb6"}, @@ -202,7 +202,7 @@ test = ["black", "flake8", "isort", "pytest", "pytest-cov", "pytest-optional-tes name = "bioutils" version = "0.5.8.post1" description = "miscellaneous simple bioinformatics utilities and lookup tables" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "bioutils-0.5.8.post1-py3-none-any.whl", hash = "sha256:f58de493260042bff78aef484a3caf84e40987b663075f8573022df6f4c2a2ac"}, @@ -220,33 +220,33 @@ test = ["pytest (>=7.1,<8.0)", "pytest-cov (>=4.0,<5.0)", "pytest-optional-tests [[package]] name = "black" -version = "24.8.0" +version = "24.10.0" description = "The uncompromising code formatter." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "black-24.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:09cdeb74d494ec023ded657f7092ba518e8cf78fa8386155e4a03fdcc44679e6"}, - {file = "black-24.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:81c6742da39f33b08e791da38410f32e27d632260e599df7245cccee2064afeb"}, - {file = "black-24.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:707a1ca89221bc8a1a64fb5e15ef39cd755633daa672a9db7498d1c19de66a42"}, - {file = "black-24.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d6417535d99c37cee4091a2f24eb2b6d5ec42b144d50f1f2e436d9fe1916fe1a"}, - {file = "black-24.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fb6e2c0b86bbd43dee042e48059c9ad7830abd5c94b0bc518c0eeec57c3eddc1"}, - {file = "black-24.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:837fd281f1908d0076844bc2b801ad2d369c78c45cf800cad7b61686051041af"}, - {file = "black-24.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62e8730977f0b77998029da7971fa896ceefa2c4c4933fcd593fa599ecbf97a4"}, - {file = "black-24.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:72901b4913cbac8972ad911dc4098d5753704d1f3c56e44ae8dce99eecb0e3af"}, - {file = "black-24.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:7c046c1d1eeb7aea9335da62472481d3bbf3fd986e093cffd35f4385c94ae368"}, - {file = "black-24.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:649f6d84ccbae73ab767e206772cc2d7a393a001070a4c814a546afd0d423aed"}, - {file = "black-24.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2b59b250fdba5f9a9cd9d0ece6e6d993d91ce877d121d161e4698af3eb9c1018"}, - {file = "black-24.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:6e55d30d44bed36593c3163b9bc63bf58b3b30e4611e4d88a0c3c239930ed5b2"}, - {file = "black-24.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:505289f17ceda596658ae81b61ebbe2d9b25aa78067035184ed0a9d855d18afd"}, - {file = "black-24.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b19c9ad992c7883ad84c9b22aaa73562a16b819c1d8db7a1a1a49fb7ec13c7d2"}, - {file = "black-24.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1f13f7f386f86f8121d76599114bb8c17b69d962137fc70efe56137727c7047e"}, - {file = "black-24.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:f490dbd59680d809ca31efdae20e634f3fae27fba3ce0ba3208333b713bc3920"}, - {file = "black-24.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eab4dd44ce80dea27dc69db40dab62d4ca96112f87996bca68cd75639aeb2e4c"}, - {file = "black-24.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3c4285573d4897a7610054af5a890bde7c65cb466040c5f0c8b732812d7f0e5e"}, - {file = "black-24.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9e84e33b37be070ba135176c123ae52a51f82306def9f7d063ee302ecab2cf47"}, - {file = "black-24.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:73bbf84ed136e45d451a260c6b73ed674652f90a2b3211d6a35e78054563a9bb"}, - {file = "black-24.8.0-py3-none-any.whl", hash = "sha256:972085c618ee94f402da1af548a4f218c754ea7e5dc70acb168bfaca4c2542ed"}, - {file = "black-24.8.0.tar.gz", hash = "sha256:2500945420b6784c38b9ee885af039f5e7471ef284ab03fa35ecdde4688cd83f"}, + {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, + {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, + {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"}, + {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"}, + {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"}, + {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"}, + {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"}, + {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"}, + {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"}, + {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"}, + {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"}, + {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"}, + {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"}, + {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"}, + {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"}, + {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"}, + {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"}, + {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"}, + {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"}, + {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"}, + {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"}, + {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"}, ] [package.dependencies] @@ -260,7 +260,7 @@ typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} [package.extras] colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.7.4)", "aiohttp (>=3.7.4,!=3.9.0)"] +d = ["aiohttp (>=3.10)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] @@ -714,13 +714,13 @@ crt = ["awscrt (==0.21.2)"] [[package]] name = "botocore-stubs" -version = "1.35.29" +version = "1.35.36" description = "Type annotations and code completion for botocore" optional = false python-versions = ">=3.8" files = [ - {file = "botocore_stubs-1.35.29-py3-none-any.whl", hash = "sha256:8eff9dc4e6e844baf65beb16eb2c68a173ccd50dc9323dc04d85060cacc36a05"}, - {file = "botocore_stubs-1.35.29.tar.gz", hash = "sha256:40d4cf5fc527fbad381be18cf837400d6f168a880e26ee794c8c04fa0a3e62c5"}, + {file = "botocore_stubs-1.35.36-py3-none-any.whl", hash = "sha256:bb02ed31bcaf6a239494d747a474e47e84fd2ff5ca226b9e6e77f604ba67e8b5"}, + {file = "botocore_stubs-1.35.36.tar.gz", hash = "sha256:0ac480692328879fd260b51c92e326428d26d60be6fb06bb606fcc2add9b162d"}, ] [package.dependencies] @@ -733,7 +733,7 @@ botocore = ["botocore"] name = "cdot" version = "0.2.21" description = "Transcripts for HGVS libraries" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "cdot-0.2.21-py3-none-any.whl", hash = "sha256:092241cc1a2ac43f288668aabcf9802421458322fe2a2a531fb6e22976305b57"}, @@ -750,7 +750,7 @@ requests = "*" name = "cdot" version = "0.2.26" description = "Transcripts for HGVS libraries" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "cdot-0.2.26-py3-none-any.whl", hash = "sha256:f9f6c3dbdb9dffda3779e77d9acef33ae3111c11a4de18fba5ff1d77cbc83c00"}, @@ -866,101 +866,116 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -992,7 +1007,7 @@ files = [ name = "coloredlogs" version = "15.0.1" description = "Colored terminal output for Python's logging module" -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, @@ -1009,7 +1024,7 @@ cron = ["capturer (>=2.4)"] name = "configparser" version = "7.1.0" description = "Updated configparser from stdlib for earlier Pythons." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "configparser-7.1.0-py3-none-any.whl", hash = "sha256:98e374573c4e10e92399651e3ba1c47a438526d633c44ee96143dec26dad4299"}, @@ -1073,7 +1088,7 @@ test-randomorder = ["pytest-randomly"] name = "decorator" version = "5.1.1" description = "Decorators for Humans" -optional = false +optional = true python-versions = ">=3.5" files = [ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, @@ -1093,21 +1108,21 @@ files = [ [[package]] name = "dnspython" -version = "2.6.1" +version = "2.7.0" description = "DNS toolkit" -optional = true -python-versions = ">=3.8" +optional = false +python-versions = ">=3.9" files = [ - {file = "dnspython-2.6.1-py3-none-any.whl", hash = "sha256:5ef3b9680161f6fa89daf8ad451b5f1a33b18ae8a1c6778cdf4b43f08c0a6e50"}, - {file = "dnspython-2.6.1.tar.gz", hash = "sha256:e8f0f9c23a7b7cb99ded64e6c3a6f3e701d78f50c55e002b839dea7225cff7cc"}, + {file = "dnspython-2.7.0-py3-none-any.whl", hash = "sha256:b4c34b7d10b51bcc3a5071e7b8dee77939f1e878477eeecc965e9835f63c6c86"}, + {file = "dnspython-2.7.0.tar.gz", hash = "sha256:ce9c432eda0dc91cf618a5cedf1a4e142651196bbcd2c80e89ed5a907e5cfaf1"}, ] [package.extras] -dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "sphinx (>=7.2.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] -dnssec = ["cryptography (>=41)"] +dev = ["black (>=23.1.0)", "coverage (>=7.0)", "flake8 (>=7)", "hypercorn (>=0.16.0)", "mypy (>=1.8)", "pylint (>=3)", "pytest (>=7.4)", "pytest-cov (>=4.1.0)", "quart-trio (>=0.11.0)", "sphinx (>=7.2.0)", "sphinx-rtd-theme (>=2.0.0)", "twine (>=4.0.0)", "wheel (>=0.42.0)"] +dnssec = ["cryptography (>=43)"] doh = ["h2 (>=4.1.0)", "httpcore (>=1.0.0)", "httpx (>=0.26.0)"] -doq = ["aioquic (>=0.9.25)"] -idna = ["idna (>=3.6)"] +doq = ["aioquic (>=1.0.0)"] +idna = ["idna (>=3.7)"] trio = ["trio (>=0.23)"] wmi = ["wmi (>=1.5.1)"] @@ -1133,7 +1148,7 @@ gmpy2 = ["gmpy2"] name = "email-validator" version = "2.1.2" description = "A robust email address syntax and deliverability validation library." -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "email_validator-2.1.2-py3-none-any.whl", hash = "sha256:d89f6324e13b1e39889eab7f9ca2f91dc9aebb6fa50a6d8bd4329ab50f251115"}, @@ -1181,7 +1196,7 @@ test = ["pytest (>=6)"] name = "executing" version = "2.1.0" description = "Get the currently executing AST node of a frame, and other information" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, @@ -1381,7 +1396,7 @@ files = [ name = "hgvs" version = "1.5.4" description = "HGVS Parser, Formatter, Mapper, Validator" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "hgvs-1.5.4-py2.py3-none-any.whl", hash = "sha256:598640bae0de34ff29c58440904fc9156d7a1bc750ddef5894edd415c772b957"}, @@ -1527,13 +1542,13 @@ lxml = ["lxml"] [[package]] name = "httpcore" -version = "1.0.5" +version = "1.0.6" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, ] [package.dependencies] @@ -1544,7 +1559,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httptools" @@ -1622,7 +1637,7 @@ socks = ["socksio (==1.*)"] name = "humanfriendly" version = "10.0" description = "Human friendly output for text interfaces using Python" -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, @@ -1682,7 +1697,7 @@ tests = ["pytest-black (>=0.3.0,<0.3.10)", "pytest-cache (>=1.0)", "pytest-inven name = "importlib-metadata" version = "8.5.0" description = "Read metadata from Python packages" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, @@ -1716,7 +1731,7 @@ files = [ name = "intervaltree" version = "3.1.0" description = "Editable interval tree data structure for Python 2 and 3" -optional = false +optional = true python-versions = "*" files = [ {file = "intervaltree-3.1.0.tar.gz", hash = "sha256:902b1b88936918f9b2a19e0e5eb7ccb430ae45cde4f39ea4b36932920d33952d"}, @@ -1729,7 +1744,7 @@ sortedcontainers = ">=2.0,<3.0" name = "ipython" version = "8.18.1" description = "IPython: Productive Interactive Computing" -optional = false +optional = true python-versions = ">=3.9" files = [ {file = "ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397"}, @@ -1777,7 +1792,7 @@ files = [ name = "jedi" version = "0.19.1" description = "An autocompletion tool for Python that can be used for text editors." -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, @@ -1826,13 +1841,13 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- [[package]] name = "jsonschema-specifications" -version = "2023.12.1" +version = "2024.10.1" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "jsonschema_specifications-2023.12.1-py3-none-any.whl", hash = "sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c"}, - {file = "jsonschema_specifications-2023.12.1.tar.gz", hash = "sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc"}, + {file = "jsonschema_specifications-2024.10.1-py3-none-any.whl", hash = "sha256:a09a0680616357d9a0ecf05c12ad234479f549239d0f5b55f3deea67475da9bf"}, + {file = "jsonschema_specifications-2024.10.1.tar.gz", hash = "sha256:0f38b83639958ce1152d02a7f062902c41c8fd20d558b0c34344292d417ae272"}, ] [package.dependencies] @@ -1842,7 +1857,7 @@ referencing = ">=0.31.0" name = "lazy" version = "1.6" description = "Lazy attributes for Python objects" -optional = false +optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" files = [ {file = "lazy-1.6-py2.py3-none-any.whl", hash = "sha256:449375c125c7acac6b7a93f71b8e7ccb06546c37b161613f92d2d3981f793244"}, @@ -2028,78 +2043,79 @@ testing = ["pytest"] [[package]] name = "markupsafe" -version = "2.1.5" +version = "3.0.1" description = "Safely add untrusted strings to HTML/XML markup." optional = true -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win32.whl", hash = "sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906"}, - {file = "MarkupSafe-2.1.5-cp311-cp311-win_amd64.whl", hash = "sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win32.whl", hash = "sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad"}, - {file = "MarkupSafe-2.1.5-cp312-cp312-win_amd64.whl", hash = "sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win32.whl", hash = "sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371"}, - {file = "MarkupSafe-2.1.5-cp37-cp37m-win_amd64.whl", hash = "sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win32.whl", hash = "sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff"}, - {file = "MarkupSafe-2.1.5-cp38-cp38-win_amd64.whl", hash = "sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win32.whl", hash = "sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf"}, - {file = "MarkupSafe-2.1.5-cp39-cp39-win_amd64.whl", hash = "sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5"}, - {file = "MarkupSafe-2.1.5.tar.gz", hash = "sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win32.whl", hash = "sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win32.whl", hash = "sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win32.whl", hash = "sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win32.whl", hash = "sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win32.whl", hash = "sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win32.whl", hash = "sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b"}, + {file = "markupsafe-3.0.1.tar.gz", hash = "sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344"}, ] [[package]] name = "matplotlib-inline" version = "0.1.7" description = "Inline Matplotlib backend for Jupyter" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, @@ -2355,7 +2371,7 @@ types-pytz = ">=2022.1.1" name = "parsley" version = "1.3" description = "Parsing and pattern matching made easy." -optional = false +optional = true python-versions = "*" files = [ {file = "Parsley-1.3-py2.py3-none-any.whl", hash = "sha256:c3bc417b8c7e3a96c87c0f2f751bfd784ed5156ffccebe2f84330df5685f8dc3"}, @@ -2366,7 +2382,7 @@ files = [ name = "parso" version = "0.8.4" description = "A Python Parser" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, @@ -2392,7 +2408,7 @@ files = [ name = "pexpect" version = "4.9.0" description = "Pexpect allows easy control of interactive console applications." -optional = false +optional = true python-versions = "*" files = [ {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, @@ -2435,24 +2451,24 @@ testing = ["pytest", "pytest-benchmark"] [[package]] name = "port-for" -version = "0.7.3" +version = "0.7.4" description = "Utility that helps with local TCP ports management. It can find an unused TCP localhost port and remember the association." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "port_for-0.7.3-py3-none-any.whl", hash = "sha256:786fa1171cee23093a475d65228b4a9877d249827ceb7cd2362cb7b80d0c69d4"}, - {file = "port_for-0.7.3.tar.gz", hash = "sha256:2d597e5854a1b323b17eba8ae0630784c779857abde5e22444c88d233a60f953"}, + {file = "port_for-0.7.4-py3-none-any.whl", hash = "sha256:08404aa072651a53dcefe8d7a598ee8a1dca320d9ac44ac464da16ccf2a02c4a"}, + {file = "port_for-0.7.4.tar.gz", hash = "sha256:fc7713e7b22f89442f335ce12536653656e8f35146739eccaeff43d28436028d"}, ] [[package]] name = "pre-commit" -version = "3.8.0" +version = "4.0.1" description = "A framework for managing and maintaining multi-language pre-commit hooks." optional = false python-versions = ">=3.9" files = [ - {file = "pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f"}, - {file = "pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af"}, + {file = "pre_commit-4.0.1-py2.py3-none-any.whl", hash = "sha256:efde913840816312445dc98787724647c65473daefe420785f885e8ed9a06878"}, + {file = "pre_commit-4.0.1.tar.gz", hash = "sha256:80905ac375958c0444c65e9cebebd948b3cdb518f335a091a670a89d652139d2"}, ] [package.dependencies] @@ -2466,7 +2482,7 @@ virtualenv = ">=20.10.0" name = "prompt-toolkit" version = "3.0.48" description = "Library for building powerful interactive command lines in Python" -optional = false +optional = true python-versions = ">=3.7.0" files = [ {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, @@ -2532,7 +2548,7 @@ test = ["anyio (>=4.0)", "mypy (>=1.11)", "pproxy (>=2.7)", "pytest (>=6.2.5)", name = "psycopg2" version = "2.9.9" description = "psycopg2 - Python-PostgreSQL Database Adapter" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"}, @@ -2554,7 +2570,7 @@ files = [ name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" -optional = false +optional = true python-versions = "*" files = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, @@ -2565,7 +2581,7 @@ files = [ name = "pure-eval" version = "0.2.3" description = "Safely evaluate AST nodes without side effects" -optional = false +optional = true python-versions = "*" files = [ {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, @@ -2682,7 +2698,7 @@ files = [ name = "pygments" version = "2.18.0" description = "Pygments is a syntax highlighting package written in Python." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, @@ -2707,7 +2723,7 @@ files = [ name = "pyreadline3" version = "3.5.4" description = "A python implementation of GNU readline." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, @@ -2721,7 +2737,7 @@ dev = ["build", "flake8", "mypy", "pytest", "twine"] name = "pysam" version = "0.22.1" description = "Package for reading, manipulating, and writing genomic data" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "pysam-0.22.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f18e72013ef2db9a9bb7e8ac421934d054427f6c03e66ce8abc39b09c846ba72"}, @@ -2911,64 +2927,24 @@ files = [ [[package]] name = "pyyaml" -version = "6.0.2" +version = "5.1.2" description = "YAML parser and emitter for Python" optional = false -python-versions = ">=3.8" +python-versions = "*" files = [ - {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, - {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, - {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, - {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, - {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, - {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, - {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, - {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, - {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, - {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, - {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, - {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, - {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, - {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, - {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, - {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, - {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, - {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, - {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, - {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, - {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, - {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, - {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, - {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, - {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, - {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, - {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, - {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, - {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, - {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, + {file = "PyYAML-5.1.2-cp27-cp27m-win32.whl", hash = "sha256:5124373960b0b3f4aa7df1707e63e9f109b5263eca5976c66e08b1c552d4eaf8"}, + {file = "PyYAML-5.1.2-cp27-cp27m-win_amd64.whl", hash = "sha256:f81025eddd0327c7d4cfe9b62cf33190e1e736cc6e97502b3ec425f574b3e7a8"}, + {file = "PyYAML-5.1.2-cp34-cp34m-win32.whl", hash = "sha256:0113bc0ec2ad727182326b61326afa3d1d8280ae1122493553fd6f4397f33df9"}, + {file = "PyYAML-5.1.2-cp34-cp34m-win_amd64.whl", hash = "sha256:5ca4f10adbddae56d824b2c09668e91219bb178a1eee1faa56af6f99f11bf696"}, + {file = "PyYAML-5.1.2-cp35-cp35m-win32.whl", hash = "sha256:bf47c0607522fdbca6c9e817a6e81b08491de50f3766a7a0e6a5be7905961b41"}, + {file = "PyYAML-5.1.2-cp35-cp35m-win_amd64.whl", hash = "sha256:87ae4c829bb25b9fe99cf71fbb2140c448f534e24c998cc60f39ae4f94396a73"}, + {file = "PyYAML-5.1.2-cp36-cp36m-win32.whl", hash = "sha256:9de9919becc9cc2ff03637872a440195ac4241c80536632fffeb6a1e25a74299"}, + {file = "PyYAML-5.1.2-cp36-cp36m-win_amd64.whl", hash = "sha256:a5a85b10e450c66b49f98846937e8cfca1db3127a9d5d1e31ca45c3d0bef4c5b"}, + {file = "PyYAML-5.1.2-cp37-cp37m-win32.whl", hash = "sha256:b0997827b4f6a7c286c01c5f60384d218dca4ed7d9efa945c3e1aa623d5709ae"}, + {file = "PyYAML-5.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:7907be34ffa3c5a32b60b95f4d95ea25361c951383a894fec31be7252b2b6f34"}, + {file = "PyYAML-5.1.2-cp38-cp38m-win32.whl", hash = "sha256:7ec9b2a4ed5cad025c2278a1e6a19c011c80a3caaac804fd2d329e9cc2c287c9"}, + {file = "PyYAML-5.1.2-cp38-cp38m-win_amd64.whl", hash = "sha256:b631ef96d3222e62861443cc89d6563ba3eeb816eeb96b2629345ab795e53681"}, + {file = "PyYAML-5.1.2.tar.gz", hash = "sha256:01adf0b6c6f61bd11af6e10ca52b7d4057dd0be0343eb9283c878cf3af56aee4"}, ] [[package]] @@ -3173,13 +3149,13 @@ pyasn1 = ">=0.1.3" [[package]] name = "s3transfer" -version = "0.10.2" +version = "0.10.3" description = "An Amazon S3 Transfer Manager" optional = true python-versions = ">=3.8" files = [ - {file = "s3transfer-0.10.2-py3-none-any.whl", hash = "sha256:eca1c20de70a39daee580aef4986996620f365c4e0fda6a86100231d62f1bf69"}, - {file = "s3transfer-0.10.2.tar.gz", hash = "sha256:0711534e9356d3cc692fdde846b4a1e4b0cb6519971860796e6bc4c7aea00ef6"}, + {file = "s3transfer-0.10.3-py3-none-any.whl", hash = "sha256:263ed587a5803c6c708d3ce44dc4dfedaab4c1a32e8329bab818933d79ddcf5d"}, + {file = "s3transfer-0.10.3.tar.gz", hash = "sha256:4f50ed74ab84d474ce614475e0b8d5047ff080810aac5d01ea25231cfc944b0c"}, ] [package.dependencies] @@ -3478,7 +3454,7 @@ sqlcipher = ["sqlcipher3_binary"] name = "sqlparse" version = "0.5.1" description = "A non-validating SQL parser." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "sqlparse-0.5.1-py3-none-any.whl", hash = "sha256:773dcbf9a5ab44a090f3441e2180efe2560220203dc2f8c0b0fa141e18b505e4"}, @@ -3493,7 +3469,7 @@ doc = ["sphinx"] name = "stack-data" version = "0.6.3" description = "Extract data from python stack frames and tracebacks for informative displays" -optional = false +optional = true python-versions = "*" files = [ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, @@ -3512,7 +3488,7 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] name = "starlette" version = "0.27.0" description = "The little ASGI library that shines." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, @@ -3530,7 +3506,7 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyam name = "starlette-context" version = "0.3.6" description = "Middleware for Starlette that allows you to store and access the context data of a request. Can be used with logging so logs automatically use request headers such as x-request-id or x-correlation-id." -optional = false +optional = true python-versions = ">=3.8,<4.0" files = [ {file = "starlette_context-0.3.6-py3-none-any.whl", hash = "sha256:b14ce373fbb6895a2182a7104b9f63ba20c8db83444005fb9a844dd77ad9895c"}, @@ -3544,7 +3520,7 @@ starlette = "*" name = "tabulate" version = "0.9.0" description = "Pretty-print tabular data" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"}, @@ -3556,20 +3532,20 @@ widechars = ["wcwidth"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] name = "tqdm" version = "4.66.5" description = "Fast, Extensible Progress Meter" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, @@ -3589,7 +3565,7 @@ telegram = ["requests"] name = "traitlets" version = "5.14.3" description = "Traitlets Python configuration system" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, @@ -3602,13 +3578,13 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0, [[package]] name = "types-awscrt" -version = "0.21.5" +version = "0.22.0" description = "Type annotations and code completion for awscrt" optional = false python-versions = ">=3.8" files = [ - {file = "types_awscrt-0.21.5-py3-none-any.whl", hash = "sha256:117ff2b1bb657f09d01b7e0ce3fe3fa6e039be12d30b826896182725c9ce85b1"}, - {file = "types_awscrt-0.21.5.tar.gz", hash = "sha256:9f7f47de68799cb2bcb9e486f48d77b9f58962b92fba43cb8860da70b3c57d1b"}, + {file = "types_awscrt-0.22.0-py3-none-any.whl", hash = "sha256:b2c196bbd3226bab42d80fae13c34548de9ddc195f5a366d79c15d18e5897aa9"}, + {file = "types_awscrt-0.22.0.tar.gz", hash = "sha256:67a660c90bad360c339f6a79310cc17094d12472042c7ca5a41450aaf5fc9a54"}, ] [[package]] @@ -3638,13 +3614,13 @@ types-pyasn1 = "*" [[package]] name = "types-pytz" -version = "2024.2.0.20240913" +version = "2024.2.0.20241003" description = "Typing stubs for pytz" optional = false python-versions = ">=3.8" files = [ - {file = "types-pytz-2024.2.0.20240913.tar.gz", hash = "sha256:4433b5df4a6fc587bbed41716d86a5ba5d832b4378e506f40d34bc9c81df2c24"}, - {file = "types_pytz-2024.2.0.20240913-py3-none-any.whl", hash = "sha256:a1eebf57ebc6e127a99d2fa2ba0a88d2b173784ef9b3defcc2004ab6855a44df"}, + {file = "types-pytz-2024.2.0.20241003.tar.gz", hash = "sha256:575dc38f385a922a212bac00a7d6d2e16e141132a3c955078f4a4fd13ed6cb44"}, + {file = "types_pytz-2024.2.0.20241003-py3-none-any.whl", hash = "sha256:3e22df1336c0c6ad1d29163c8fda82736909eb977281cb823c57f8bae07118b7"}, ] [[package]] @@ -3688,13 +3664,13 @@ urllib3 = ">=2" [[package]] name = "types-s3transfer" -version = "0.10.2" +version = "0.10.3" description = "Type annotations and code completion for s3transfer" optional = false python-versions = ">=3.8" files = [ - {file = "types_s3transfer-0.10.2-py3-none-any.whl", hash = "sha256:7a3fec8cd632e2b5efb665a355ef93c2a87fdd5a45b74a949f95a9e628a86356"}, - {file = "types_s3transfer-0.10.2.tar.gz", hash = "sha256:60167a3bfb5c536ec6cdb5818f7f9a28edca9dc3e0b5ff85ae374526fc5e576e"}, + {file = "types_s3transfer-0.10.3-py3-none-any.whl", hash = "sha256:d34c5a82f531af95bb550927136ff5b737a1ed3087f90a59d545591dfde5b4cc"}, + {file = "types_s3transfer-0.10.3.tar.gz", hash = "sha256:f761b2876ac4c208e6c6b75cdf5f6939009768be9950c545b11b0225e7703ee7"}, ] [[package]] @@ -3969,7 +3945,7 @@ tests = ["build", "coverage", "mypy", "pyyaml", "ruff", "wheel"] name = "wcwidth" version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" -optional = false +optional = true python-versions = "*" files = [ {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, @@ -4086,7 +4062,7 @@ files = [ name = "yoyo-migrations" version = "8.2.0" description = "Database migrations with SQL" -optional = false +optional = true python-versions = "*" files = [ {file = "yoyo-migrations-8.2.0.tar.gz", hash = "sha256:820606a03e262cf1cd4f59e256c28fa446425224d5b82a3d1275fd78178523e4"}, @@ -4107,7 +4083,7 @@ pyodbc = ["pyodbc"] name = "zipp" version = "3.20.2" description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, @@ -4123,9 +4099,9 @@ test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", type = ["pytest-mypy"] [extras] -server = ["alembic", "arq", "authlib", "boto3", "cryptography", "email-validator", "fastapi", "orcid", "psycopg2", "python-jose", "python-multipart", "requests", "slack-sdk", "uvicorn", "watchtower"] +server = ["alembic", "arq", "authlib", "biocommons", "boto3", "cdot", "cryptography", "fastapi", "hgvs", "orcid", "psycopg2", "python-jose", "python-multipart", "requests", "slack-sdk", "starlette", "starlette-context", "uvicorn", "watchtower"] [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "2de247df51a9bf90242c2e716970dba3ab2ad1d9e27a4b225968f8873e128e55" +content-hash = "d19ee1ff7a7ef41f6e72f85bd9038938178e539f0852d6e91531a6d80193bde8" From ab3047675329a3a4f405ac3b4893ebf5e7520bea Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 9 Oct 2024 10:56:56 -0700 Subject: [PATCH 30/58] Fix Typo in Forward Reference Model --- src/mavedb/view_models/score_set.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index 463431c6..5c166e64 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -319,5 +319,5 @@ class ScoreSetPublicDump(SavedScoreSet): from mavedb.view_models.experiment import Experiment -ShorterScoreSet.update_forward_refs() +ShortScoreSet.update_forward_refs() ScoreSet.update_forward_refs() From 43a24cf4db1e6a1fc24736186db0e72929a4cb2d Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 9 Sep 2024 14:08:38 -0700 Subject: [PATCH 31/58] Add Score Range Column to Score Sets --- .../2b6f40ea2fb6_add_score_range_column.py | 28 +++++++++++++++++++ src/mavedb/models/score_set.py | 5 +++- 2 files changed, 32 insertions(+), 1 deletion(-) create mode 100644 alembic/versions/2b6f40ea2fb6_add_score_range_column.py diff --git a/alembic/versions/2b6f40ea2fb6_add_score_range_column.py b/alembic/versions/2b6f40ea2fb6_add_score_range_column.py new file mode 100644 index 00000000..63095f41 --- /dev/null +++ b/alembic/versions/2b6f40ea2fb6_add_score_range_column.py @@ -0,0 +1,28 @@ +"""Add score range column + +Revision ID: 2b6f40ea2fb6 +Revises: 76e1e55bc5c1 +Create Date: 2024-09-09 12:25:33.180077 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = "2b6f40ea2fb6" +down_revision = "76e1e55bc5c1" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column("scoresets", sa.Column("score_ranges", postgresql.JSONB(astext_type=sa.Text()), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("scoresets", "score_ranges") + # ### end Alembic commands ### diff --git a/src/mavedb/models/score_set.py b/src/mavedb/models/score_set.py index d7bea1dc..01e3ec43 100644 --- a/src/mavedb/models/score_set.py +++ b/src/mavedb/models/score_set.py @@ -157,6 +157,7 @@ class ScoreSet(Base): ) target_genes: Mapped[List["TargetGene"]] = relationship(back_populates="score_set", cascade="all, delete-orphan") + score_ranges = Column(JSONB, nullable=True) # Unfortunately, we can't use association_proxy here, because in spite of what the documentation seems to imply, it # doesn't check for a pre-existing keyword with the same text. @@ -171,7 +172,9 @@ def legacy_keywords(self) -> list[str]: # return self._updated_keywords # else: legacy_keyword_objs = self.legacy_keyword_objs or [] # getattr(self, 'keyword_objs', []) - return [legacy_keyword_obj.text for legacy_keyword_obj in legacy_keyword_objs if legacy_keyword_obj.text is not None] + return [ + legacy_keyword_obj.text for legacy_keyword_obj in legacy_keyword_objs if legacy_keyword_obj.text is not None + ] async def set_legacy_keywords(self, db, keywords: list[str]): self.keyword_objs = [await self._find_or_create_legacy_keyword(db, text) for text in keywords] From 10350b869c1e82c802c47e1bc695fa32ea96f40d Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 9 Sep 2024 14:09:22 -0700 Subject: [PATCH 32/58] Add Validation lib Helpers for Score Ranges --- .../lib/validation/constants/score_set.py | 1 + src/mavedb/lib/validation/utilities.py | 25 +++++++++++++++++++ 2 files changed, 26 insertions(+) create mode 100644 src/mavedb/lib/validation/constants/score_set.py diff --git a/src/mavedb/lib/validation/constants/score_set.py b/src/mavedb/lib/validation/constants/score_set.py new file mode 100644 index 00000000..eaf9aa27 --- /dev/null +++ b/src/mavedb/lib/validation/constants/score_set.py @@ -0,0 +1 @@ +default_ranges = ["normal", "abnormal"] diff --git a/src/mavedb/lib/validation/utilities.py b/src/mavedb/lib/validation/utilities.py index 985b31e1..247a70b2 100644 --- a/src/mavedb/lib/validation/utilities.py +++ b/src/mavedb/lib/validation/utilities.py @@ -1,3 +1,4 @@ +import math from random import choice from typing import Optional, SupportsIndex @@ -274,6 +275,30 @@ def convert_hgvs_nt_to_hgvs_pro(hgvs_nt: str, target_seq: str): return construct_hgvs_pro(wt=target_aa, mutant=variant_aa, position=codon_number, target_seq=target_seq) +def inf_or_float(v: Optional[float], lower: bool) -> float: + """ + This function takes an hgvs formatted string and returns True if the hgvs string indicates + there was no change from the target sequence. + + Parameters + ---------- + hgvs : string + hgvs formatted string + + Returns + ------- + wt : bool + True if hgvs string indicates wild type + """ + if v is None: + if lower: + return -math.inf + else: + return math.inf + else: + return v + + def _is_wild_type(hgvs: str): # TODO this is no longer valid """ From 43dbf431a729a1b2e7a4c851b56ae86b1d9715fd Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 9 Sep 2024 14:11:12 -0700 Subject: [PATCH 33/58] Accept Score Ranges from API Methods --- src/mavedb/view_models/score_set.py | 72 ++++++++++- tests/routers/test_score_set.py | 48 +++++++ tests/view_models/test_score_set.py | 192 ++++++++++++++++++++++++++-- 3 files changed, 301 insertions(+), 11 deletions(-) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index b6cc9064..d5075360 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -4,11 +4,13 @@ from datetime import date from pydantic import root_validator from typing import Collection, Dict, Optional, Any, Sequence + from humps import camelize +from mavedb.lib.validation.constants.score_set import default_ranges from mavedb.lib.validation import urn_re from mavedb.lib.validation.exceptions import ValidationError -from mavedb.lib.validation.utilities import is_null +from mavedb.lib.validation.utilities import is_null, inf_or_float from mavedb.models.enums.processing_state import ProcessingState from mavedb.models.enums.mapping_state import MappingState from mavedb.models.target_sequence import TargetSequence @@ -41,6 +43,11 @@ class ExternalLink(BaseModel): url: Optional[str] +class ScoreRange(BaseModel): + description: Optional[str] + range: tuple[Optional[float], Optional[float]] + + class ScoreSetGetter(PublicationIdentifiersGetter): def get(self, key: Any, default: Any = ...) -> Any: if key == "meta_analyzes_score_set_urns": @@ -70,6 +77,7 @@ class ScoreSetModify(ScoreSetBase): secondary_publication_identifiers: Optional[list[PublicationIdentifierCreate]] doi_identifiers: Optional[list[DoiIdentifierCreate]] target_genes: list[TargetGeneCreate] + score_ranges: Optional[dict[str, ScoreRange]] @validator("title", "short_description", "abstract_text", "method_text") def validate_field_is_non_empty(cls, v): @@ -124,6 +132,67 @@ def at_least_one_target_gene_exists(cls, field_value, values): return field_value + @validator("score_ranges") + def ranges_are_not_backwards(cls, field_value: dict[str, ScoreRange]): + for k, v in field_value.items(): + if len(v.range) != 2: + raise ValidationError("Only a lower and upper bound are allowed.") + if inf_or_float(v.range[0], True) > inf_or_float(v.range[1], False): + raise ValidationError( + f"The lower bound of the `{k}` score range may not be larger than the upper bound." + ) + elif inf_or_float(v.range[0], True) == inf_or_float(v.range[1], False): + raise ValidationError(f"The lower and upper bound of the `{k}` score range may not be the same.") + + return field_value + + @validator("score_ranges") + def ranges_do_not_overlap(cls, field_value: dict[str, ScoreRange]): + def test_overlap( + tp1: tuple[Optional[float], Optional[float]], tp2: tuple[Optional[float], Optional[float]] + ) -> bool: + # Always check the tuple with the lowest lower bound. If we do not check + # overlaps in this manner, checking the overlap of (0,1) and (1,2) will + # yield different results depending on the ordering of tuples. + if min(inf_or_float(tp1[0], True), inf_or_float(tp2[0], True)) == inf_or_float(tp1[0], True): + tp_with_min_value = tp1 + tp_with_non_min_value = tp2 + else: + tp_with_min_value = tp2 + tp_with_non_min_value = tp1 + + if inf_or_float(tp_with_min_value[1], False) > inf_or_float( + tp_with_non_min_value[0], True + ) and inf_or_float(tp_with_min_value[0], True) <= inf_or_float(tp_with_non_min_value[1], False): + return True + + return False + + for i, (k_test, v_test) in enumerate(field_value.items()): + for k_check, v_check in list(field_value.items())[i + 1 :]: + if test_overlap(v_test.range, v_check.range): + raise ValidationError(f"Score ranges may not overlap; `{k_test}` overlaps with `{k_check}`") + + return field_value + + @validator("score_ranges") + def ranges_contain_normal_and_abnormal(cls, field_value: dict[str, ScoreRange]): + ranges = set(field_value.keys()) + if not set(default_ranges).issubset(ranges): + raise ValidationError("Both `normal` and `abnormal` ranges must be provided.") + + return field_value + + @validator("score_ranges") + def description_exists_for_all_ranges(cls, field_value: dict[str, ScoreRange]): + for k, v in field_value.items(): + if k not in default_ranges and is_null(v.description): + raise ValidationError( + f"A description must be present for each non-default score range (No description provided for range `{k}`)." + ) + + return field_value + class ScoreSetCreate(ScoreSetModify): """View model for creating a new score set.""" @@ -239,6 +308,7 @@ class SavedScoreSet(ScoreSetBase): dataset_columns: Dict external_links: Dict[str, ExternalLink] contributors: list[Contributor] + score_ranges: Optional[dict[str, ScoreRange]] class Config: orm_mode = True diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index b9cbc5b5..4a9d683d 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -102,6 +102,54 @@ def test_create_score_set_with_contributor(client, setup_router_db): assert response.status_code == 200 +def test_create_score_set_with_score_range(client, setup_router_db): + experiment = create_experiment(client) + score_set = deepcopy(TEST_MINIMAL_SEQ_SCORESET) + score_set["experimentUrn"] = experiment["urn"] + score_set.update( + { + "score_ranges": { + "normal": {"range": (-2, 2)}, + "abnormal": {"range": (2, None)}, + "custom1": {"description": "A user provided custom range", "range": (None, -2)}, + } + } + ) + + response = client.post("/api/v1/score-sets/", json=score_set) + assert response.status_code == 200 + + response_data = response.json() + # Omitting this check for now, because it seems the schema generation behavior + # for nullable items within a tuple doesn't work quite right. The schema generated + # by our view model implies the tuple must have two numeric elements, but in reality + # those elements may by None. + # jsonschema.validate(instance=response_data, schema=ScoreSet.schema()) + assert isinstance(MAVEDB_TMP_URN_RE.fullmatch(response_data["urn"]), re.Match) + + expected_response = deepcopy(TEST_MINIMAL_SEQ_SCORESET_RESPONSE) + expected_response.update({"urn": response_data["urn"]}) + expected_response["experiment"].update( + { + "urn": experiment["urn"], + "experimentSetUrn": experiment["experimentSetUrn"], + "scoreSetUrns": [response_data["urn"]], + } + ) + expected_response["scoreRanges"] = { + # Although the ranges are lists, the jsonschema should apply a min + max length to them + "normal": {"range": [-2, 2]}, + "abnormal": {"range": [2, None]}, + "custom1": {"description": "A user provided custom range", "range": [None, -2]}, + } + + assert sorted(expected_response.keys()) == sorted(response_data.keys()) + for key in expected_response: + assert (key, expected_response[key]) == (key, response_data[key]) + response = client.get(f"/api/v1/score-sets/{response_data['urn']}") + assert response.status_code == 200 + + def test_cannot_create_score_set_without_email(client, setup_router_db): experiment = create_experiment(client) score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index 39c9d7d6..7adc1152 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -2,6 +2,7 @@ from fastapi.encoders import jsonable_encoder +from mavedb.lib.validation.constants.score_set import default_ranges from mavedb.view_models.score_set import ScoreSetCreate, ScoreSetModify from mavedb.view_models.target_gene import TargetGeneCreate from mavedb.view_models.publication_identifier import PublicationIdentifierCreate @@ -13,7 +14,9 @@ def test_cannot_create_score_set_without_a_target(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() with pytest.raises(ValueError) as exc_info: - ScoreSetModify(**jsonable_encoder(score_set_test, exclude={"targetGenes"}), target_genes=[]) + ScoreSetModify( + **jsonable_encoder(score_set_test, exclude={"targetGenes"}), target_genes=[] + ) assert "Score sets should define at least one target." in str(exc_info.value) @@ -28,32 +31,49 @@ def test_cannot_create_score_set_with_multiple_primary_publications(): ScoreSetModify( **jsonable_encoder(score_set_test), exclude={"targetGenes"}, - target_genes=[TargetGeneCreate(**jsonable_encoder(target)) for target in score_set_test["targetGenes"]], + target_genes=[ + TargetGeneCreate(**jsonable_encoder(target)) + for target in score_set_test["targetGenes"] + ], primary_publication_identifiers=[identifier_one, identifier_two], ) - assert "multiple primary publication identifiers are not allowed" in str(exc_info.value) + assert "multiple primary publication identifiers are not allowed" in str( + exc_info.value + ) def test_cannot_create_score_set_without_target_gene_labels_when_multiple_targets_exist(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() - target_gene_one = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) - target_gene_two = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) + target_gene_one = TargetGeneCreate( + **jsonable_encoder(score_set_test["targetGenes"][0]) + ) + target_gene_two = TargetGeneCreate( + **jsonable_encoder(score_set_test["targetGenes"][0]) + ) with pytest.raises(ValueError) as exc_info: ScoreSetModify( - **jsonable_encoder(score_set_test, exclude={"targetGenes"}), target_genes=[target_gene_one, target_gene_two] + **jsonable_encoder(score_set_test, exclude={"targetGenes"}), + target_genes=[target_gene_one, target_gene_two], ) - assert "Target sequence labels cannot be empty when multiple targets are defined." in str(exc_info.value) + assert ( + "Target sequence labels cannot be empty when multiple targets are defined." + in str(exc_info.value) + ) def test_cannot_create_score_set_with_non_unique_target_labels(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() - target_gene_one = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) - target_gene_two = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) + target_gene_one = TargetGeneCreate( + **jsonable_encoder(score_set_test["targetGenes"][0]) + ) + target_gene_two = TargetGeneCreate( + **jsonable_encoder(score_set_test["targetGenes"][0]) + ) non_unique = "BRCA1" target_gene_one.target_sequence.label = non_unique @@ -61,7 +81,8 @@ def test_cannot_create_score_set_with_non_unique_target_labels(): with pytest.raises(ValueError) as exc_info: ScoreSetModify( - **jsonable_encoder(score_set_test, exclude={"targetGenes"}), target_genes=[target_gene_one, target_gene_two] + **jsonable_encoder(score_set_test, exclude={"targetGenes"}), + target_genes=[target_gene_one, target_gene_two], ) assert "Target sequence labels cannot be duplicated." in str(exc_info.value) @@ -204,3 +225,154 @@ def test_cannot_create_score_set_with_an_empty_method(): assert "none is not an allowed value" in str(exc_info.value) assert "methodText" in str(exc_info.value) + + +def test_cannot_create_score_set_with_overlapping_ranges(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (0, 1.1)}, + "abnormal": {"range": (1, 2.1)}, + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "Score ranges may not overlap; `normal` overlaps with `abnormal`" in str( + exc_info.value + ) + + +def test_can_create_score_set_with_adjacent_ranges(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (0, 1)}, + "abnormal": {"range": (1, 2.1)}, + } + + ScoreSetModify(**jsonable_encoder(score_set_test)) + + +def test_can_create_score_set_with_flipped_adjacent_ranges(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (1, 2.1)}, + "abnormal": {"range": (0, 1)}, + } + + ScoreSetModify(**jsonable_encoder(score_set_test)) + + +def test_can_create_score_set_with_adjacent_negative_ranges(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (-1, 0)}, + "abnormal": {"range": (-3, -1)}, + } + + ScoreSetModify(**jsonable_encoder(score_set_test)) + + +def test_can_create_score_set_with_flipped_adjacent_negative_ranges(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (-3, -1)}, + "abnormal": {"range": (-1, 0)}, + } + + ScoreSetModify(**jsonable_encoder(score_set_test)) + + +def test_cannot_create_score_set_with_overlapping_upper_unbounded_ranges(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (0, None)}, + "abnormal": {"range": (1, None)}, + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "Score ranges may not overlap; `normal` overlaps with `abnormal`" in str( + exc_info.value + ) + + +def test_cannot_create_score_set_with_overlapping_lower_unbounded_ranges(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (None, 0)}, + "abnormal": {"range": (None, 1)}, + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "Score ranges may not overlap; `normal` overlaps with `abnormal`" in str( + exc_info.value + ) + + +def test_cannot_create_score_set_with_backwards_bounds(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (1.2, 1.1)}, + "abnormal": {"range": (2.2, 2.1)}, + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert ( + "The lower bound of the `normal` score range may not be larger than the upper bound." + in str(exc_info.value) + ) + + +def test_cannot_create_score_set_with_equal_bounds(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (1.2, 1.2)}, + "abnormal": {"range": (2.2, 2.2)}, + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert ( + "The lower and upper bound of the `normal` score range may not be the same." + in str(exc_info.value) + ) + + +@pytest.mark.parametrize("missing_name", default_ranges) +def test_cannot_create_score_set_without_default_ranges(missing_name): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (1.2, 1.3)}, + "abnormal": {"range": (2.2, 2.3)}, + } + score_set_test["score_ranges"].pop(missing_name) + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "Both `normal` and `abnormal` ranges must be provided." in str( + exc_info.value + ) + + +def test_cannot_create_score_set_with_empty_description_for_non_default_score(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (1.2, 1.3)}, + "abnormal": {"range": (2.2, 2.3)}, + "custom1": {"description": "", "range": (3.2, 3.3)}, + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert ( + "A description must be present for each non-default score range (No description provided for range `custom1`)." + in str(exc_info.value) + ) From 9c8f7ca2cc9953e32ed7c74f01a8119ebc149a53 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 9 Sep 2024 14:16:45 -0700 Subject: [PATCH 34/58] Fix Docstring in Util inf Function --- src/mavedb/lib/validation/utilities.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/mavedb/lib/validation/utilities.py b/src/mavedb/lib/validation/utilities.py index 247a70b2..bb028016 100644 --- a/src/mavedb/lib/validation/utilities.py +++ b/src/mavedb/lib/validation/utilities.py @@ -277,18 +277,21 @@ def convert_hgvs_nt_to_hgvs_pro(hgvs_nt: str, target_seq: str): def inf_or_float(v: Optional[float], lower: bool) -> float: """ - This function takes an hgvs formatted string and returns True if the hgvs string indicates - there was no change from the target sequence. + This function takes an optional float and either converts the passed nonetype + object to the appropriate infinity value (based on lower) or returns the float + directly. Parameters ---------- - hgvs : string - hgvs formatted string + v : float or None + an optional floating point value + lower : bool + whether the value is a lower bound Returns ------- - wt : bool - True if hgvs string indicates wild type + v : float + Infinity or -Infinity if the initially passed v was None. v otherwise. """ if v is None: if lower: From dd1a611f6773c077d570aa0ee7221be8c126658b Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 9 Sep 2024 14:29:22 -0700 Subject: [PATCH 35/58] Type hint workaround for buggy jsonschema generation --- src/mavedb/view_models/score_set.py | 8 +++++++- tests/routers/test_score_set.py | 6 +----- tests/view_models/test_score_set.py | 13 +++++++++++++ 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index d5075360..810f025f 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -45,7 +45,13 @@ class ExternalLink(BaseModel): class ScoreRange(BaseModel): description: Optional[str] - range: tuple[Optional[float], Optional[float]] + # Purposefully vague type hint because of some odd JSON Schema generation behavior. + # Typing this as tuple[Union[float, None], Union[float, None]] will generate an invalid + # jsonschema, and fail all tests that access the schema. This may be fixed in pydantic v2, + # but it is unclear. Even just typing it as Tuple[Any, Any] will generate an invalid schema! + # + # tuple[Union[float, None], Union[float, None]] + range: list[Any, Any] class ScoreSetGetter(PublicationIdentifiersGetter): diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index 4a9d683d..211716bc 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -120,11 +120,7 @@ def test_create_score_set_with_score_range(client, setup_router_db): assert response.status_code == 200 response_data = response.json() - # Omitting this check for now, because it seems the schema generation behavior - # for nullable items within a tuple doesn't work quite right. The schema generated - # by our view model implies the tuple must have two numeric elements, but in reality - # those elements may by None. - # jsonschema.validate(instance=response_data, schema=ScoreSet.schema()) + jsonschema.validate(instance=response_data, schema=ScoreSet.schema()) assert isinstance(MAVEDB_TMP_URN_RE.fullmatch(response_data["urn"]), re.Match) expected_response = deepcopy(TEST_MINIMAL_SEQ_SCORESET_RESPONSE) diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index 7adc1152..7b67715d 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -227,6 +227,19 @@ def test_cannot_create_score_set_with_an_empty_method(): assert "methodText" in str(exc_info.value) +def test_cannot_create_score_set_with_too_many_boundaries(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "normal": {"range": (0, 1.1, 2.2)}, + "abnormal": {"range": (1, 2.1, 3.1)}, + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "Only a lower and upper bound are allowed." in str(exc_info.value) + + def test_cannot_create_score_set_with_overlapping_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { From 64fc89d375c771eec509fbde22941c937a90da36 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 9 Sep 2024 14:30:58 -0700 Subject: [PATCH 36/58] Remove Tuple Type Hints to Appease MyPy --- src/mavedb/view_models/score_set.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index 810f025f..7aa11e7d 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -51,7 +51,7 @@ class ScoreRange(BaseModel): # but it is unclear. Even just typing it as Tuple[Any, Any] will generate an invalid schema! # # tuple[Union[float, None], Union[float, None]] - range: list[Any, Any] + range: list[Any] class ScoreSetGetter(PublicationIdentifiersGetter): @@ -154,9 +154,7 @@ def ranges_are_not_backwards(cls, field_value: dict[str, ScoreRange]): @validator("score_ranges") def ranges_do_not_overlap(cls, field_value: dict[str, ScoreRange]): - def test_overlap( - tp1: tuple[Optional[float], Optional[float]], tp2: tuple[Optional[float], Optional[float]] - ) -> bool: + def test_overlap(tp1, tp2) -> bool: # Always check the tuple with the lowest lower bound. If we do not check # overlaps in this manner, checking the overlap of (0,1) and (1,2) will # yield different results depending on the ordering of tuples. From 5c93e5f88b504549faf24dc3bbf2ec4bda3bc91d Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 16 Sep 2024 15:37:43 -0700 Subject: [PATCH 37/58] New Alembic Downgrade Revision after Rebase --- alembic/versions/2b6f40ea2fb6_add_score_range_column.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/alembic/versions/2b6f40ea2fb6_add_score_range_column.py b/alembic/versions/2b6f40ea2fb6_add_score_range_column.py index 63095f41..aaafffeb 100644 --- a/alembic/versions/2b6f40ea2fb6_add_score_range_column.py +++ b/alembic/versions/2b6f40ea2fb6_add_score_range_column.py @@ -1,7 +1,7 @@ """Add score range column Revision ID: 2b6f40ea2fb6 -Revises: 76e1e55bc5c1 +Revises: 1d4933b4b6f7 Create Date: 2024-09-09 12:25:33.180077 """ @@ -11,7 +11,7 @@ # revision identifiers, used by Alembic. revision = "2b6f40ea2fb6" -down_revision = "76e1e55bc5c1" +down_revision = "1d4933b4b6f7" branch_labels = None depends_on = None From 9360aa85740426b0e6db7fc8d9ea0d8d8c1c861b Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 17 Sep 2024 10:38:52 -0700 Subject: [PATCH 38/58] Add wt_scores; Literal classifications; Explicit label; Ranges as list --- src/mavedb/view_models/score_set.py | 78 +++++++----- tests/routers/test_score_set.py | 29 +++-- tests/view_models/test_score_set.py | 177 +++++++++++++++++++++------- 3 files changed, 204 insertions(+), 80 deletions(-) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index 7aa11e7d..677a1bc9 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -2,8 +2,8 @@ from __future__ import annotations from datetime import date -from pydantic import root_validator -from typing import Collection, Dict, Optional, Any, Sequence +from pydantic import root_validator, conlist +from typing import Collection, Dict, Literal, Optional, Any, Sequence from humps import camelize @@ -44,14 +44,19 @@ class ExternalLink(BaseModel): class ScoreRange(BaseModel): + label: str description: Optional[str] + classification: Literal["normal", "abnormal"] # Purposefully vague type hint because of some odd JSON Schema generation behavior. # Typing this as tuple[Union[float, None], Union[float, None]] will generate an invalid # jsonschema, and fail all tests that access the schema. This may be fixed in pydantic v2, - # but it is unclear. Even just typing it as Tuple[Any, Any] will generate an invalid schema! - # - # tuple[Union[float, None], Union[float, None]] - range: list[Any] + # but it's unclear. Even just typing it as Tuple[Any, Any] will generate an invalid schema! + range: list[Any] # really: tuple[Union[float, None], Union[float, None]] + + +class ScoreRanges(BaseModel): + wt_score: float + ranges: conlist(ScoreRange, min_items=1) class ScoreSetGetter(PublicationIdentifiersGetter): @@ -83,7 +88,7 @@ class ScoreSetModify(ScoreSetBase): secondary_publication_identifiers: Optional[list[PublicationIdentifierCreate]] doi_identifiers: Optional[list[DoiIdentifierCreate]] target_genes: list[TargetGeneCreate] - score_ranges: Optional[dict[str, ScoreRange]] + score_ranges: Optional[ScoreRanges] @validator("title", "short_description", "abstract_text", "method_text") def validate_field_is_non_empty(cls, v): @@ -139,21 +144,23 @@ def at_least_one_target_gene_exists(cls, field_value, values): return field_value @validator("score_ranges") - def ranges_are_not_backwards(cls, field_value: dict[str, ScoreRange]): - for k, v in field_value.items(): - if len(v.range) != 2: + def ranges_are_not_backwards(cls, field_value: ScoreRanges): + for range_model in field_value.ranges: + if len(range_model.range) != 2: raise ValidationError("Only a lower and upper bound are allowed.") - if inf_or_float(v.range[0], True) > inf_or_float(v.range[1], False): + if inf_or_float(range_model.range[0], True) > inf_or_float(range_model.range[1], False): raise ValidationError( - f"The lower bound of the `{k}` score range may not be larger than the upper bound." + f"The lower bound of the `{range_model.label}` score range may not be larger than the upper bound." + ) + elif inf_or_float(range_model.range[0], True) == inf_or_float(range_model.range[1], False): + raise ValidationError( + f"The lower and upper bound of the `{range_model.label}` score range may not be the same." ) - elif inf_or_float(v.range[0], True) == inf_or_float(v.range[1], False): - raise ValidationError(f"The lower and upper bound of the `{k}` score range may not be the same.") return field_value @validator("score_ranges") - def ranges_do_not_overlap(cls, field_value: dict[str, ScoreRange]): + def ranges_do_not_overlap(cls, field_value: ScoreRanges): def test_overlap(tp1, tp2) -> bool: # Always check the tuple with the lowest lower bound. If we do not check # overlaps in this manner, checking the overlap of (0,1) and (1,2) will @@ -172,28 +179,43 @@ def test_overlap(tp1, tp2) -> bool: return False - for i, (k_test, v_test) in enumerate(field_value.items()): - for k_check, v_check in list(field_value.items())[i + 1 :]: - if test_overlap(v_test.range, v_check.range): - raise ValidationError(f"Score ranges may not overlap; `{k_test}` overlaps with `{k_check}`") + for i, range_test in enumerate(field_value.ranges): + for range_check in list(field_value.ranges)[i + 1 :]: + if test_overlap(range_test.range, range_check.range): + raise ValidationError( + f"Score ranges may not overlap; `{range_test.label}` overlaps with `{range_check.label}`" + ) return field_value @validator("score_ranges") - def ranges_contain_normal_and_abnormal(cls, field_value: dict[str, ScoreRange]): - ranges = set(field_value.keys()) + def ranges_contain_normal_and_abnormal(cls, field_value: ScoreRanges): + ranges = set([range_model.classification for range_model in field_value.ranges]) if not set(default_ranges).issubset(ranges): raise ValidationError("Both `normal` and `abnormal` ranges must be provided.") return field_value @validator("score_ranges") - def description_exists_for_all_ranges(cls, field_value: dict[str, ScoreRange]): - for k, v in field_value.items(): - if k not in default_ranges and is_null(v.description): - raise ValidationError( - f"A description must be present for each non-default score range (No description provided for range `{k}`)." - ) + def wild_type_score_in_normal_range(cls, field_value: ScoreRanges): + normal_ranges = [ + range_model.range for range_model in field_value.ranges if range_model.classification == "normal" + ] + for range in normal_ranges: + if field_value.wt_score >= inf_or_float(range[0], lower=True) and field_value.wt_score < inf_or_float( + range[1], lower=False + ): + return field_value + + raise ValidationError( + f"The provided wild type score of {field_value.wt_score} is not within any of the provided normal ranges. This score should be within a normal range." + ) + + @validator("score_ranges") + def score_range_labels_must_be_unique(cls, field_value: ScoreRanges): + range_labels = set([range_model.label.strip() for range_model in field_value.ranges]) + if len(range_labels) != len(field_value.ranges): + raise ValidationError("Detected repeated labels. Range labels must be unique.") return field_value @@ -312,7 +334,7 @@ class SavedScoreSet(ScoreSetBase): dataset_columns: Dict external_links: Dict[str, ExternalLink] contributors: list[Contributor] - score_ranges: Optional[dict[str, ScoreRange]] + score_ranges: Optional[ScoreRanges] class Config: orm_mode = True diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index 211716bc..5e4cbe39 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -109,9 +109,17 @@ def test_create_score_set_with_score_range(client, setup_router_db): score_set.update( { "score_ranges": { - "normal": {"range": (-2, 2)}, - "abnormal": {"range": (2, None)}, - "custom1": {"description": "A user provided custom range", "range": (None, -2)}, + "wt_score": 0.5, + "ranges": [ + {"label": "range_1", "range": (-2, 2), "classification": "normal"}, + {"label": "range_2", "range": (2, None), "classification": "abnormal"}, + { + "label": "custom_1", + "range": (None, -2), + "classification": "abnormal", + "description": "A user provided custom range", + }, + ], } } ) @@ -133,10 +141,17 @@ def test_create_score_set_with_score_range(client, setup_router_db): } ) expected_response["scoreRanges"] = { - # Although the ranges are lists, the jsonschema should apply a min + max length to them - "normal": {"range": [-2, 2]}, - "abnormal": {"range": [2, None]}, - "custom1": {"description": "A user provided custom range", "range": [None, -2]}, + "wtScore": 0.5, + "ranges": [ + {"label": "range_1", "range": [-2, 2], "classification": "normal"}, + {"label": "range_2", "range": [2, None], "classification": "abnormal"}, + { + "label": "custom_1", + "range": [None, -2], + "classification": "abnormal", + "description": "A user provided custom range", + }, + ], } assert sorted(expected_response.keys()) == sorted(response_data.keys()) diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index 7b67715d..82491b9b 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -230,8 +230,11 @@ def test_cannot_create_score_set_with_an_empty_method(): def test_cannot_create_score_set_with_too_many_boundaries(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (0, 1.1, 2.2)}, - "abnormal": {"range": (1, 2.1, 3.1)}, + "wt_score": 0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (0, 1, 2.0)}, + {"label": "range_2", "classification": "abnormal", "range": (2.0, 2.1, 2.3)}, + ], } with pytest.raises(ValueError) as exc_info: @@ -243,23 +246,27 @@ def test_cannot_create_score_set_with_too_many_boundaries(): def test_cannot_create_score_set_with_overlapping_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (0, 1.1)}, - "abnormal": {"range": (1, 2.1)}, + "wt_score": 0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (0, 1.1)}, + {"label": "range_2", "classification": "abnormal", "range": (1, 2.1)}, + ], } with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "Score ranges may not overlap; `normal` overlaps with `abnormal`" in str( - exc_info.value - ) + assert "Score ranges may not overlap; `range_1` overlaps with `range_2`" in str(exc_info.value) def test_can_create_score_set_with_adjacent_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (0, 1)}, - "abnormal": {"range": (1, 2.1)}, + "wt_score": 0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (0, 1)}, + {"label": "range_2", "classification": "abnormal", "range": (1, 2.1)}, + ], } ScoreSetModify(**jsonable_encoder(score_set_test)) @@ -268,8 +275,11 @@ def test_can_create_score_set_with_adjacent_ranges(): def test_can_create_score_set_with_flipped_adjacent_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (1, 2.1)}, - "abnormal": {"range": (0, 1)}, + "wt_score": 0.5, + "ranges": [ + {"label": "range_2", "classification": "abnormal", "range": (1, 2.1)}, + {"label": "range_1", "classification": "normal", "range": (0, 1)}, + ], } ScoreSetModify(**jsonable_encoder(score_set_test)) @@ -278,8 +288,11 @@ def test_can_create_score_set_with_flipped_adjacent_ranges(): def test_can_create_score_set_with_adjacent_negative_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (-1, 0)}, - "abnormal": {"range": (-3, -1)}, + "wt_score": -0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (-1, 0)}, + {"label": "range_2", "classification": "abnormal", "range": (-3, -1)}, + ], } ScoreSetModify(**jsonable_encoder(score_set_test)) @@ -288,8 +301,11 @@ def test_can_create_score_set_with_adjacent_negative_ranges(): def test_can_create_score_set_with_flipped_adjacent_negative_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (-3, -1)}, - "abnormal": {"range": (-1, 0)}, + "wt_score": -0.5, + "ranges": [ + {"label": "range_2", "classification": "abnormal", "range": (-3, -1)}, + {"label": "range_1", "classification": "normal", "range": (-1, 0)}, + ], } ScoreSetModify(**jsonable_encoder(score_set_test)) @@ -298,73 +314,146 @@ def test_can_create_score_set_with_flipped_adjacent_negative_ranges(): def test_cannot_create_score_set_with_overlapping_upper_unbounded_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (0, None)}, - "abnormal": {"range": (1, None)}, + "wt_score": 0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (0, None)}, + {"label": "range_2", "classification": "abnormal", "range": (1, None)}, + ], } with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "Score ranges may not overlap; `normal` overlaps with `abnormal`" in str( - exc_info.value - ) + assert "Score ranges may not overlap; `range_1` overlaps with `range_2`" in str(exc_info.value) def test_cannot_create_score_set_with_overlapping_lower_unbounded_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (None, 0)}, - "abnormal": {"range": (None, 1)}, + "wt_score": -0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (None, 0)}, + {"label": "range_2", "classification": "abnormal", "range": (None, -1)}, + ], } with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "Score ranges may not overlap; `normal` overlaps with `abnormal`" in str( - exc_info.value - ) + assert "Score ranges may not overlap; `range_1` overlaps with `range_2`" in str(exc_info.value) def test_cannot_create_score_set_with_backwards_bounds(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (1.2, 1.1)}, - "abnormal": {"range": (2.2, 2.1)}, + "wt_score": 0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (1, 0)}, + ], + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "The lower bound of the `range_1` score range may not be larger than the upper bound." in str(exc_info.value) + + +def test_cannot_create_score_set_with_equal_bounds(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "wt_score": 1, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (-1, -1)}, + ], + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "The lower and upper bound of the `range_1` score range may not be the same." in str(exc_info.value) + + +def test_cannot_create_score_set_with_duplicate_range_labels(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "wt_score": -0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (-1, 0)}, + {"label": "range_1", "classification": "abnormal", "range": (-3, -1)}, + ], + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "Detected repeated labels. Range labels must be unique." in str(exc_info.value) + + +def test_cannot_create_score_set_with_duplicate_range_labels_whitespace(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "wt_score": -0.5, + "ranges": [ + {"label": " range_1", "classification": "normal", "range": (-1, 0)}, + {"label": "range_1 ", "classification": "abnormal", "range": (-3, -1)}, + ], + } + + with pytest.raises(ValueError) as exc_info: + ScoreSetModify(**jsonable_encoder(score_set_test)) + + assert "Detected repeated labels. Range labels must be unique." in str(exc_info.value) + + +def test_cannot_create_score_set_with_wild_type_outside_ranges(): + wt_score = 0.5 + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "wt_score": wt_score, + "ranges": [ + {"label": " range_1", "classification": "normal", "range": (-1, 0)}, + {"label": "range_1 ", "classification": "abnormal", "range": (-3, -1)}, + ], } with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) assert ( - "The lower bound of the `normal` score range may not be larger than the upper bound." + f"The provided wild type score of {wt_score} is not within any of the provided normal ranges. This score should be within a normal range." in str(exc_info.value) ) -def test_cannot_create_score_set_with_equal_bounds(): +def test_cannot_create_score_set_with_wild_type_outside_normal_range(): + wt_score = -1.5 score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (1.2, 1.2)}, - "abnormal": {"range": (2.2, 2.2)}, + "wt_score": wt_score, + "ranges": [ + {"label": " range_1", "classification": "normal", "range": (-1, 0)}, + {"label": "range_1 ", "classification": "abnormal", "range": (-3, -1)}, + ], } with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) assert ( - "The lower and upper bound of the `normal` score range may not be the same." + f"The provided wild type score of {wt_score} is not within any of the provided normal ranges. This score should be within a normal range." in str(exc_info.value) ) -@pytest.mark.parametrize("missing_name", default_ranges) -def test_cannot_create_score_set_without_default_ranges(missing_name): +@pytest.mark.parametrize("present_name", default_ranges) +def test_cannot_create_score_set_without_default_range(present_name): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (1.2, 1.3)}, - "abnormal": {"range": (2.2, 2.3)}, + "wt_score": -1.5, + "ranges": [ + {"label": "range_2", "classification": f"{present_name}", "range": (-3, -1)}, + ], } - score_set_test["score_ranges"].pop(missing_name) with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) @@ -374,18 +463,16 @@ def test_cannot_create_score_set_without_default_ranges(missing_name): ) -def test_cannot_create_score_set_with_empty_description_for_non_default_score(): +def test_cannot_create_score_set_without_default_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { - "normal": {"range": (1.2, 1.3)}, - "abnormal": {"range": (2.2, 2.3)}, - "custom1": {"description": "", "range": (3.2, 3.3)}, + "wt_score": -0.5, + "ranges": [ + {"label": "range_1", "classification": "other", "range": (-1, 0)}, + ], } with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert ( - "A description must be present for each non-default score range (No description provided for range `custom1`)." - in str(exc_info.value) - ) + assert "unexpected value; permitted: 'normal', 'abnormal'" in str(exc_info.value) From 0f6425c3b67edc53d3814c43694271bb6b6bf014 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Tue, 17 Sep 2024 11:04:55 -0700 Subject: [PATCH 39/58] Make MyPy Ignore Conlist Type Hint --- src/mavedb/view_models/score_set.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index 677a1bc9..fe60ade4 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -56,7 +56,7 @@ class ScoreRange(BaseModel): class ScoreRanges(BaseModel): wt_score: float - ranges: conlist(ScoreRange, min_items=1) + ranges: conlist(ScoreRange, min_items=1) # type: ignore class ScoreSetGetter(PublicationIdentifiersGetter): From 368dc247702e59484f6c1d04f21a0c250e64ba97 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 18 Sep 2024 09:40:56 -0700 Subject: [PATCH 40/58] Improvements to Validation Error Location Output --- src/mavedb/lib/validation/utilities.py | 6 +- src/mavedb/routers/score_sets.py | 5 ++ src/mavedb/view_models/score_set.py | 83 ++++++++++++++++---------- tests/view_models/test_score_set.py | 32 +++++++--- 4 files changed, 84 insertions(+), 42 deletions(-) diff --git a/src/mavedb/lib/validation/utilities.py b/src/mavedb/lib/validation/utilities.py index bb028016..b7a97bbd 100644 --- a/src/mavedb/lib/validation/utilities.py +++ b/src/mavedb/lib/validation/utilities.py @@ -1,6 +1,6 @@ import math from random import choice -from typing import Optional, SupportsIndex +from typing import Optional, SupportsIndex, Union from mavedb.lib.validation.constants.conversion import codon_dict_DNA from mavedb.lib.validation.constants.conversion import aa_dict_key_1 @@ -275,7 +275,7 @@ def convert_hgvs_nt_to_hgvs_pro(hgvs_nt: str, target_seq: str): return construct_hgvs_pro(wt=target_aa, mutant=variant_aa, position=codon_number, target_seq=target_seq) -def inf_or_float(v: Optional[float], lower: bool) -> float: +def inf_or_float(v: Optional[Union[float, int, str]], lower: bool) -> float: """ This function takes an optional float and either converts the passed nonetype object to the appropriate infinity value (based on lower) or returns the float @@ -299,7 +299,7 @@ def inf_or_float(v: Optional[float], lower: bool) -> float: else: return math.inf else: - return v + return float(v) def _is_wild_type(hgvs: str): diff --git a/src/mavedb/routers/score_sets.py b/src/mavedb/routers/score_sets.py index b5fb0dee..01bac3e2 100644 --- a/src/mavedb/routers/score_sets.py +++ b/src/mavedb/routers/score_sets.py @@ -688,6 +688,7 @@ async def update_score_set( for var, value in vars(item_update).items(): if var not in [ "contributors", + "score_ranges", "doi_identifiers", "experiment_urn", "license_id", @@ -730,6 +731,9 @@ async def update_score_set( item.publication_identifiers = publication_identifiers + if item_update.score_ranges: + item.score_ranges = item_update.score_ranges.dict() + # Delete the old target gene, WT sequence, and reference map. These will be deleted when we set the score set's # target_gene to None, because we have set cascade='all,delete-orphan' on ScoreSet.target_gene. (Since the # relationship is defined with the target gene as owner, this is actually set up in the backref attribute of @@ -882,6 +886,7 @@ async def update_score_set( for var, value in vars(item_update).items(): if var not in [ + "score_ranges", "contributors", "doi_identifiers", "experiment_urn", diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index fe60ade4..7cfbcd11 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -46,17 +46,42 @@ class ExternalLink(BaseModel): class ScoreRange(BaseModel): label: str description: Optional[str] - classification: Literal["normal", "abnormal"] + classification: str # Purposefully vague type hint because of some odd JSON Schema generation behavior. # Typing this as tuple[Union[float, None], Union[float, None]] will generate an invalid # jsonschema, and fail all tests that access the schema. This may be fixed in pydantic v2, # but it's unclear. Even just typing it as Tuple[Any, Any] will generate an invalid schema! range: list[Any] # really: tuple[Union[float, None], Union[float, None]] + @validator("classification") + def range_classification_value_is_accepted(cls, field_value: str): + classification = field_value.strip().lower() + if classification not in default_ranges: + raise ValidationError( + f"Unexpected classification value(s): {classification}. Permitted values: {default_ranges}" + ) + + return classification + + @validator("range") + def ranges_are_not_backwards(cls, field_value: tuple[Any]): + if len(field_value) != 2: + raise ValidationError("Only a lower and upper bound are allowed.") + + field_value[0] = inf_or_float(field_value[0], True) if field_value[0] else None + field_value[1] = inf_or_float(field_value[1], False) if field_value[1] else None + + if inf_or_float(field_value[0], True) > inf_or_float(field_value[1], False): + raise ValidationError("The lower bound of the score range may not be larger than the upper bound.") + elif inf_or_float(field_value[0], True) == inf_or_float(field_value[1], False): + raise ValidationError("The lower and upper bound of the score range may not be the same.") + + return field_value + class ScoreRanges(BaseModel): wt_score: float - ranges: conlist(ScoreRange, min_items=1) # type: ignore + ranges: list[ScoreRange] # type: ignore class ScoreSetGetter(PublicationIdentifiersGetter): @@ -144,19 +169,31 @@ def at_least_one_target_gene_exists(cls, field_value, values): return field_value @validator("score_ranges") - def ranges_are_not_backwards(cls, field_value: ScoreRanges): - for range_model in field_value.ranges: - if len(range_model.range) != 2: - raise ValidationError("Only a lower and upper bound are allowed.") - if inf_or_float(range_model.range[0], True) > inf_or_float(range_model.range[1], False): - raise ValidationError( - f"The lower bound of the `{range_model.label}` score range may not be larger than the upper bound." - ) - elif inf_or_float(range_model.range[0], True) == inf_or_float(range_model.range[1], False): + def score_range_labels_must_be_unique(cls, field_value: ScoreRanges): + existing_labels = [] + for i, range_model in enumerate(field_value.ranges): + range_model.label = range_model.label.strip() + + if range_model.label in existing_labels: raise ValidationError( - f"The lower and upper bound of the `{range_model.label}` score range may not be the same." + f"Detected repeated label: `{range_model.label}`. Range labels must be unique.", + custom_loc=["body", "scoreRanges", "ranges", i, "label"], ) + existing_labels.append(range_model.label) + + return field_value + + @validator("score_ranges") + def ranges_contain_normal_and_abnormal(cls, field_value: ScoreRanges): + ranges = set([range_model.classification for range_model in field_value.ranges]) + if not set(default_ranges).issubset(ranges): + raise ValidationError( + "Both `normal` and `abnormal` ranges must be provided.", + # Raise this error inside the first classification provided by the model. + custom_loc=["body", "scoreRanges", "ranges", 0, "classification"], + ) + return field_value @validator("score_ranges") @@ -183,19 +220,12 @@ def test_overlap(tp1, tp2) -> bool: for range_check in list(field_value.ranges)[i + 1 :]: if test_overlap(range_test.range, range_check.range): raise ValidationError( - f"Score ranges may not overlap; `{range_test.label}` overlaps with `{range_check.label}`" + f"Score ranges may not overlap; `{range_test.label}` overlaps with `{range_check.label}`", + custom_loc=["body", "scoreRanges", "ranges", i, "range"], ) return field_value - @validator("score_ranges") - def ranges_contain_normal_and_abnormal(cls, field_value: ScoreRanges): - ranges = set([range_model.classification for range_model in field_value.ranges]) - if not set(default_ranges).issubset(ranges): - raise ValidationError("Both `normal` and `abnormal` ranges must be provided.") - - return field_value - @validator("score_ranges") def wild_type_score_in_normal_range(cls, field_value: ScoreRanges): normal_ranges = [ @@ -208,17 +238,10 @@ def wild_type_score_in_normal_range(cls, field_value: ScoreRanges): return field_value raise ValidationError( - f"The provided wild type score of {field_value.wt_score} is not within any of the provided normal ranges. This score should be within a normal range." + f"The provided wild type score of {field_value.wt_score} is not within any of the provided normal ranges. This score should be within a normal range.", + custom_loc=["body", "scoreRanges", "wtScore"], ) - @validator("score_ranges") - def score_range_labels_must_be_unique(cls, field_value: ScoreRanges): - range_labels = set([range_model.label.strip() for range_model in field_value.ranges]) - if len(range_labels) != len(field_value.ranges): - raise ValidationError("Detected repeated labels. Range labels must be unique.") - - return field_value - class ScoreSetCreate(ScoreSetModify): """View model for creating a new score set.""" diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index 82491b9b..c7278e7f 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -259,6 +259,20 @@ def test_cannot_create_score_set_with_overlapping_ranges(): assert "Score ranges may not overlap; `range_1` overlaps with `range_2`" in str(exc_info.value) +def test_can_create_score_set_with_mixed_range_types(): + score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() + score_set_test["score_ranges"] = { + "wt_score": 0.5, + "ranges": [ + {"label": "range_1", "classification": "normal", "range": (0, 1)}, + {"label": "range_2", "classification": "abnormal", "range": ("1.1", 2.1)}, + {"label": "range_2", "classification": "abnormal", "range": (2.2, "3.2")}, + ], + } + + ScoreSetModify(**jsonable_encoder(score_set_test)) + + def test_can_create_score_set_with_adjacent_ranges(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() score_set_test["score_ranges"] = { @@ -355,7 +369,7 @@ def test_cannot_create_score_set_with_backwards_bounds(): with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "The lower bound of the `range_1` score range may not be larger than the upper bound." in str(exc_info.value) + assert "The lower bound of the score range may not be larger than the upper bound." in str(exc_info.value) def test_cannot_create_score_set_with_equal_bounds(): @@ -370,7 +384,7 @@ def test_cannot_create_score_set_with_equal_bounds(): with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "The lower and upper bound of the `range_1` score range may not be the same." in str(exc_info.value) + assert "The lower and upper bound of the score range may not be the same." in str(exc_info.value) def test_cannot_create_score_set_with_duplicate_range_labels(): @@ -386,7 +400,7 @@ def test_cannot_create_score_set_with_duplicate_range_labels(): with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "Detected repeated labels. Range labels must be unique." in str(exc_info.value) + assert "Detected repeated label: `range_1`. Range labels must be unique." in str(exc_info.value) def test_cannot_create_score_set_with_duplicate_range_labels_whitespace(): @@ -402,7 +416,7 @@ def test_cannot_create_score_set_with_duplicate_range_labels_whitespace(): with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "Detected repeated labels. Range labels must be unique." in str(exc_info.value) + assert "Detected repeated label: `range_1`. Range labels must be unique." in str(exc_info.value) def test_cannot_create_score_set_with_wild_type_outside_ranges(): @@ -411,8 +425,8 @@ def test_cannot_create_score_set_with_wild_type_outside_ranges(): score_set_test["score_ranges"] = { "wt_score": wt_score, "ranges": [ - {"label": " range_1", "classification": "normal", "range": (-1, 0)}, - {"label": "range_1 ", "classification": "abnormal", "range": (-3, -1)}, + {"label": "range_1", "classification": "normal", "range": (-1, 0)}, + {"label": "range_2", "classification": "abnormal", "range": (-3, -1)}, ], } @@ -431,8 +445,8 @@ def test_cannot_create_score_set_with_wild_type_outside_normal_range(): score_set_test["score_ranges"] = { "wt_score": wt_score, "ranges": [ - {"label": " range_1", "classification": "normal", "range": (-1, 0)}, - {"label": "range_1 ", "classification": "abnormal", "range": (-3, -1)}, + {"label": "range_1", "classification": "normal", "range": (-1, 0)}, + {"label": "range_2", "classification": "abnormal", "range": (-3, -1)}, ], } @@ -475,4 +489,4 @@ def test_cannot_create_score_set_without_default_ranges(): with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "unexpected value; permitted: 'normal', 'abnormal'" in str(exc_info.value) + assert "Unexpected classification value(s): other. Permitted values: ['normal', 'abnormal']" in str(exc_info.value) From ba028de3726fbbb15369935d4444e7c1374b9685 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 18 Sep 2024 15:02:54 -0700 Subject: [PATCH 41/58] Check Nonetype in Ranges Explicitly --- src/mavedb/view_models/score_set.py | 5 +++-- tests/view_models/test_score_set.py | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index 7cfbcd11..e08278f3 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -68,8 +68,8 @@ def ranges_are_not_backwards(cls, field_value: tuple[Any]): if len(field_value) != 2: raise ValidationError("Only a lower and upper bound are allowed.") - field_value[0] = inf_or_float(field_value[0], True) if field_value[0] else None - field_value[1] = inf_or_float(field_value[1], False) if field_value[1] else None + field_value[0] = inf_or_float(field_value[0], True) if field_value[0] is not None else None + field_value[1] = inf_or_float(field_value[1], False) if field_value[1] is not None else None if inf_or_float(field_value[0], True) > inf_or_float(field_value[1], False): raise ValidationError("The lower bound of the score range may not be larger than the upper bound.") @@ -232,6 +232,7 @@ def wild_type_score_in_normal_range(cls, field_value: ScoreRanges): range_model.range for range_model in field_value.ranges if range_model.classification == "normal" ] for range in normal_ranges: + print(range) if field_value.wt_score >= inf_or_float(range[0], lower=True) and field_value.wt_score < inf_or_float( range[1], lower=False ): diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index c7278e7f..368d199f 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -266,7 +266,7 @@ def test_can_create_score_set_with_mixed_range_types(): "ranges": [ {"label": "range_1", "classification": "normal", "range": (0, 1)}, {"label": "range_2", "classification": "abnormal", "range": ("1.1", 2.1)}, - {"label": "range_2", "classification": "abnormal", "range": (2.2, "3.2")}, + {"label": "range_3", "classification": "abnormal", "range": (2.2, "3.2")}, ], } @@ -363,6 +363,7 @@ def test_cannot_create_score_set_with_backwards_bounds(): "wt_score": 0.5, "ranges": [ {"label": "range_1", "classification": "normal", "range": (1, 0)}, + {"label": "range_2", "classification": "abnormal", "range": (2, 1)}, ], } From c52ad52d4373e871de2783ba800e1a6c2698101a Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 9 Oct 2024 11:18:46 -0700 Subject: [PATCH 42/58] Fix Alembic Revision Flow with Rebase --- .../versions/1cee01c42909_make_index_on_contributors_unique.py | 2 +- alembic/versions/2b6f40ea2fb6_add_score_range_column.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py b/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py index 1f5462c1..298182b1 100644 --- a/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py +++ b/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py @@ -11,7 +11,7 @@ # revision identifiers, used by Alembic. revision = "1cee01c42909" -down_revision = "76e1e55bc5c1" +down_revision = "1d4933b4b6f7" branch_labels = None depends_on = None diff --git a/alembic/versions/2b6f40ea2fb6_add_score_range_column.py b/alembic/versions/2b6f40ea2fb6_add_score_range_column.py index aaafffeb..b2de846e 100644 --- a/alembic/versions/2b6f40ea2fb6_add_score_range_column.py +++ b/alembic/versions/2b6f40ea2fb6_add_score_range_column.py @@ -11,7 +11,7 @@ # revision identifiers, used by Alembic. revision = "2b6f40ea2fb6" -down_revision = "1d4933b4b6f7" +down_revision = "1cee01c42909" branch_labels = None depends_on = None From f8b3becfd7632c5267055502bfd26aeef1d2b9b9 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 9 Oct 2024 15:11:10 -0700 Subject: [PATCH 43/58] Add Pytest Coverage to Git Actions --- .github/workflows/run-tests-on-push.yml | 6 +- poetry.lock | 115 +++++++++++++++++++++--- pyproject.toml | 1 + 3 files changed, 109 insertions(+), 13 deletions(-) diff --git a/.github/workflows/run-tests-on-push.yml b/.github/workflows/run-tests-on-push.yml index dbc017b0..05f34f94 100644 --- a/.github/workflows/run-tests-on-push.yml +++ b/.github/workflows/run-tests-on-push.yml @@ -18,7 +18,7 @@ jobs: - run: pip install --upgrade pip - run: pip install poetry - run: poetry install --with dev --extras server - - run: poetry run pytest tests/ --show-capture=stdout + - run: poetry run pytest tests/ --show-capture=stdout --cov=src run-tests-3_10: runs-on: ubuntu-latest @@ -32,7 +32,7 @@ jobs: - run: pip install --upgrade pip - run: pip install poetry - run: poetry install --with dev --extras server - - run: poetry run pytest tests/ --show-capture=stdout + - run: poetry run pytest tests/ --show-capture=stdout --cov=src run-tests-3_11: runs-on: ubuntu-latest @@ -46,7 +46,7 @@ jobs: - run: pip install --upgrade pip - run: pip install poetry - run: poetry install --with dev --extras server - - run: poetry run pytest tests/ --show-capture=stdout + - run: poetry run pytest tests/ --show-capture=stdout --cov=src run-mypy-3_10: runs-on: ubuntu-latest diff --git a/poetry.lock b/poetry.lock index c8a5fab0..f8832cbf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -714,13 +714,13 @@ crt = ["awscrt (==0.21.2)"] [[package]] name = "botocore-stubs" -version = "1.35.36" +version = "1.35.37" description = "Type annotations and code completion for botocore" optional = false python-versions = ">=3.8" files = [ - {file = "botocore_stubs-1.35.36-py3-none-any.whl", hash = "sha256:bb02ed31bcaf6a239494d747a474e47e84fd2ff5ca226b9e6e77f604ba67e8b5"}, - {file = "botocore_stubs-1.35.36.tar.gz", hash = "sha256:0ac480692328879fd260b51c92e326428d26d60be6fb06bb606fcc2add9b162d"}, + {file = "botocore_stubs-1.35.37-py3-none-any.whl", hash = "sha256:0fd4ce53636196fcb72b8dad1c67cb774f2f1941891a9c5293f91401ff6d8589"}, + {file = "botocore_stubs-1.35.37.tar.gz", hash = "sha256:834f1d55c8e8a815bbb446fe9a7d3da09c4402312ff1a8fcf13fb6b4b894ab92"}, ] [package.dependencies] @@ -1035,6 +1035,83 @@ files = [ doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] test = ["pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)", "types-backports"] +[[package]] +name = "coverage" +version = "7.6.2" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +files = [ + {file = "coverage-7.6.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c9df1950fb92d49970cce38100d7e7293c84ed3606eaa16ea0b6bc27175bb667"}, + {file = "coverage-7.6.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:24500f4b0e03aab60ce575c85365beab64b44d4db837021e08339f61d1fbfe52"}, + {file = "coverage-7.6.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a663b180b6669c400b4630a24cc776f23a992d38ce7ae72ede2a397ce6b0f170"}, + {file = "coverage-7.6.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfde025e2793a22efe8c21f807d276bd1d6a4bcc5ba6f19dbdfc4e7a12160909"}, + {file = "coverage-7.6.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:087932079c065d7b8ebadd3a0160656c55954144af6439886c8bcf78bbbcde7f"}, + {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:9c6b0c1cafd96213a0327cf680acb39f70e452caf8e9a25aeb05316db9c07f89"}, + {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6e85830eed5b5263ffa0c62428e43cb844296f3b4461f09e4bdb0d44ec190bc2"}, + {file = "coverage-7.6.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:62ab4231c01e156ece1b3a187c87173f31cbeee83a5e1f6dff17f288dca93345"}, + {file = "coverage-7.6.2-cp310-cp310-win32.whl", hash = "sha256:7b80fbb0da3aebde102a37ef0138aeedff45997e22f8962e5f16ae1742852676"}, + {file = "coverage-7.6.2-cp310-cp310-win_amd64.whl", hash = "sha256:d20c3d1f31f14d6962a4e2f549c21d31e670b90f777ef4171be540fb7fb70f02"}, + {file = "coverage-7.6.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb21bac7783c1bf6f4bbe68b1e0ff0d20e7e7732cfb7995bc8d96e23aa90fc7b"}, + {file = "coverage-7.6.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a7b2e437fbd8fae5bc7716b9c7ff97aecc95f0b4d56e4ca08b3c8d8adcaadb84"}, + {file = "coverage-7.6.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:536f77f2bf5797983652d1d55f1a7272a29afcc89e3ae51caa99b2db4e89d658"}, + {file = "coverage-7.6.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f361296ca7054f0936b02525646b2731b32c8074ba6defab524b79b2b7eeac72"}, + {file = "coverage-7.6.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7926d8d034e06b479797c199747dd774d5e86179f2ce44294423327a88d66ca7"}, + {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:0bbae11c138585c89fb4e991faefb174a80112e1a7557d507aaa07675c62e66b"}, + {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:fcad7d5d2bbfeae1026b395036a8aa5abf67e8038ae7e6a25c7d0f88b10a8e6a"}, + {file = "coverage-7.6.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f01e53575f27097d75d42de33b1b289c74b16891ce576d767ad8c48d17aeb5e0"}, + {file = "coverage-7.6.2-cp311-cp311-win32.whl", hash = "sha256:7781f4f70c9b0b39e1b129b10c7d43a4e0c91f90c60435e6da8288efc2b73438"}, + {file = "coverage-7.6.2-cp311-cp311-win_amd64.whl", hash = "sha256:9bcd51eeca35a80e76dc5794a9dd7cb04b97f0e8af620d54711793bfc1fbba4b"}, + {file = "coverage-7.6.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ebc94fadbd4a3f4215993326a6a00e47d79889391f5659bf310f55fe5d9f581c"}, + {file = "coverage-7.6.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9681516288e3dcf0aa7c26231178cc0be6cac9705cac06709f2353c5b406cfea"}, + {file = "coverage-7.6.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8d9c5d13927d77af4fbe453953810db766f75401e764727e73a6ee4f82527b3e"}, + {file = "coverage-7.6.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b92f9ca04b3e719d69b02dc4a69debb795af84cb7afd09c5eb5d54b4a1ae2191"}, + {file = "coverage-7.6.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ff2ef83d6d0b527b5c9dad73819b24a2f76fdddcfd6c4e7a4d7e73ecb0656b4"}, + {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:47ccb6e99a3031ffbbd6e7cc041e70770b4fe405370c66a54dbf26a500ded80b"}, + {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a867d26f06bcd047ef716175b2696b315cb7571ccb951006d61ca80bbc356e9e"}, + {file = "coverage-7.6.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cdfcf2e914e2ba653101157458afd0ad92a16731eeba9a611b5cbb3e7124e74b"}, + {file = "coverage-7.6.2-cp312-cp312-win32.whl", hash = "sha256:f9035695dadfb397bee9eeaf1dc7fbeda483bf7664a7397a629846800ce6e276"}, + {file = "coverage-7.6.2-cp312-cp312-win_amd64.whl", hash = "sha256:5ed69befa9a9fc796fe015a7040c9398722d6b97df73a6b608e9e275fa0932b0"}, + {file = "coverage-7.6.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4eea60c79d36a8f39475b1af887663bc3ae4f31289cd216f514ce18d5938df40"}, + {file = "coverage-7.6.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:aa68a6cdbe1bc6793a9dbfc38302c11599bbe1837392ae9b1d238b9ef3dafcf1"}, + {file = "coverage-7.6.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ec528ae69f0a139690fad6deac8a7d33629fa61ccce693fdd07ddf7e9931fba"}, + {file = "coverage-7.6.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed5ac02126f74d190fa2cc14a9eb2a5d9837d5863920fa472b02eb1595cdc925"}, + {file = "coverage-7.6.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21c0ea0d4db8a36b275cb6fb2437a3715697a4ba3cb7b918d3525cc75f726304"}, + {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:35a51598f29b2a19e26d0908bd196f771a9b1c5d9a07bf20be0adf28f1ad4f77"}, + {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c9192925acc33e146864b8cf037e2ed32a91fdf7644ae875f5d46cd2ef086a5f"}, + {file = "coverage-7.6.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf4eeecc9e10f5403ec06138978235af79c9a79af494eb6b1d60a50b49ed2869"}, + {file = "coverage-7.6.2-cp313-cp313-win32.whl", hash = "sha256:e4ee15b267d2dad3e8759ca441ad450c334f3733304c55210c2a44516e8d5530"}, + {file = "coverage-7.6.2-cp313-cp313-win_amd64.whl", hash = "sha256:c71965d1ced48bf97aab79fad56df82c566b4c498ffc09c2094605727c4b7e36"}, + {file = "coverage-7.6.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:7571e8bbecc6ac066256f9de40365ff833553e2e0c0c004f4482facb131820ef"}, + {file = "coverage-7.6.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:078a87519057dacb5d77e333f740708ec2a8f768655f1db07f8dfd28d7a005f0"}, + {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e5e92e3e84a8718d2de36cd8387459cba9a4508337b8c5f450ce42b87a9e760"}, + {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ebabdf1c76593a09ee18c1a06cd3022919861365219ea3aca0247ededf6facd6"}, + {file = "coverage-7.6.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12179eb0575b8900912711688e45474f04ab3934aaa7b624dea7b3c511ecc90f"}, + {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:39d3b964abfe1519b9d313ab28abf1d02faea26cd14b27f5283849bf59479ff5"}, + {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:84c4315577f7cd511d6250ffd0f695c825efe729f4205c0340f7004eda51191f"}, + {file = "coverage-7.6.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:ff797320dcbff57caa6b2301c3913784a010e13b1f6cf4ab3f563f3c5e7919db"}, + {file = "coverage-7.6.2-cp313-cp313t-win32.whl", hash = "sha256:2b636a301e53964550e2f3094484fa5a96e699db318d65398cfba438c5c92171"}, + {file = "coverage-7.6.2-cp313-cp313t-win_amd64.whl", hash = "sha256:d03a060ac1a08e10589c27d509bbdb35b65f2d7f3f8d81cf2fa199877c7bc58a"}, + {file = "coverage-7.6.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c37faddc8acd826cfc5e2392531aba734b229741d3daec7f4c777a8f0d4993e5"}, + {file = "coverage-7.6.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ab31fdd643f162c467cfe6a86e9cb5f1965b632e5e65c072d90854ff486d02cf"}, + {file = "coverage-7.6.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97df87e1a20deb75ac7d920c812e9326096aa00a9a4b6d07679b4f1f14b06c90"}, + {file = "coverage-7.6.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:343056c5e0737487a5291f5691f4dfeb25b3e3c8699b4d36b92bb0e586219d14"}, + {file = "coverage-7.6.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad4ef1c56b47b6b9024b939d503ab487231df1f722065a48f4fc61832130b90e"}, + {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:7fca4a92c8a7a73dee6946471bce6d1443d94155694b893b79e19ca2a540d86e"}, + {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:69f251804e052fc46d29d0e7348cdc5fcbfc4861dc4a1ebedef7e78d241ad39e"}, + {file = "coverage-7.6.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:e8ea055b3ea046c0f66217af65bc193bbbeca1c8661dc5fd42698db5795d2627"}, + {file = "coverage-7.6.2-cp39-cp39-win32.whl", hash = "sha256:6c2ba1e0c24d8fae8f2cf0aeb2fc0a2a7f69b6d20bd8d3749fd6b36ecef5edf0"}, + {file = "coverage-7.6.2-cp39-cp39-win_amd64.whl", hash = "sha256:2186369a654a15628e9c1c9921409a6b3eda833e4b91f3ca2a7d9f77abb4987c"}, + {file = "coverage-7.6.2-pp39.pp310-none-any.whl", hash = "sha256:667952739daafe9616db19fbedbdb87917eee253ac4f31d70c7587f7ab531b4e"}, + {file = "coverage-7.6.2.tar.gz", hash = "sha256:a5f81e68aa62bc0cfca04f7b19eaa8f9c826b53fc82ab9e2121976dc74f131f3"}, +] + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + [[package]] name = "cryptography" version = "43.0.1" @@ -1097,13 +1174,13 @@ files = [ [[package]] name = "distlib" -version = "0.3.8" +version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" files = [ - {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, - {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, ] [[package]] @@ -2810,6 +2887,24 @@ pytest = ">=7.0.0,<9" docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] +[[package]] +name = "pytest-cov" +version = "5.0.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"}, + {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"}, +] + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"] + [[package]] name = "pytest-postgresql" version = "5.0.0" @@ -3741,13 +3836,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "uvicorn" -version = "0.31.0" +version = "0.31.1" description = "The lightning-fast ASGI server." optional = true python-versions = ">=3.8" files = [ - {file = "uvicorn-0.31.0-py3-none-any.whl", hash = "sha256:cac7be4dd4d891c363cd942160a7b02e69150dcbc7a36be04d5f4af4b17c8ced"}, - {file = "uvicorn-0.31.0.tar.gz", hash = "sha256:13bc21373d103859f68fe739608e2eb054a816dea79189bc3ca08ea89a275906"}, + {file = "uvicorn-0.31.1-py3-none-any.whl", hash = "sha256:adc42d9cac80cf3e51af97c1851648066841e7cfb6993a4ca8de29ac1548ed41"}, + {file = "uvicorn-0.31.1.tar.gz", hash = "sha256:f5167919867b161b7bcaf32646c6a94cdbd4c3aa2eb5c17d36bb9aa5cfd8c493"}, ] [package.dependencies] @@ -4104,4 +4199,4 @@ server = ["alembic", "arq", "authlib", "biocommons", "boto3", "cdot", "cryptogra [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "d19ee1ff7a7ef41f6e72f85bd9038938178e539f0852d6e91531a6d80193bde8" +content-hash = "10ebc4bcf807aea1ec077b25aac0ed9fce82853b30f1b8b9879f22d14ff8c7a8" diff --git a/pyproject.toml b/pyproject.toml index 30597272..624d9db5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,6 +72,7 @@ pre-commit = "*" jsonschema = "*" fakeredis = "~2.21.1" pytest = "~7.2.0" +pytest-cov = "~5.0.0" pytest-postgresql = "~5.0.0" pytest-asyncio = "~0.23.5" pytest-socket = "~0.6.0" From 9252e2399d5100bbbdba8f5b53f9ce61cac238b5 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Thu, 10 Oct 2024 18:09:41 +1100 Subject: [PATCH 44/58] Modified has_permission function so that any users can add a score set to a public experiment. Add some related tests. --- src/mavedb/lib/permissions.py | 17 +++--- src/mavedb/routers/score_sets.py | 1 - tests/routers/test_experiments.py | 92 +++++++++++++++++++++++++++++++ tests/routers/test_score_set.py | 84 ++++++++++++++++++++++++++++ 4 files changed, 184 insertions(+), 10 deletions(-) diff --git a/src/mavedb/lib/permissions.py b/src/mavedb/lib/permissions.py index db3b05e1..cb0e1931 100644 --- a/src/mavedb/lib/permissions.py +++ b/src/mavedb/lib/permissions.py @@ -177,15 +177,14 @@ def has_permission(user_data: Optional[UserData], item: Base, action: Action) -> else: return PermissionResponse(False) elif action == Action.ADD_SCORE_SET: - return PermissionResponse( - (user_may_edit or roles_permitted(active_roles, [UserRole.admin])), - 404 if private else 403, - ( - f"experiment with URN '{item.urn}' not found" - if private - else f"insufficient permissions for URN '{item.urn}'" - ), - ) + if user_may_edit or roles_permitted(active_roles, [UserRole.admin]): + return PermissionResponse(True) + elif private: + return PermissionResponse(False, 404, f"experiment with URN '{item.urn}' not found") + elif user_data is not None: + return PermissionResponse(True) + else: + return PermissionResponse(False, 403, f"insufficient permissions for URN '{item.urn}'") else: raise NotImplementedError(f"has_permission(User, Experiment, {action}, Role)") diff --git a/src/mavedb/routers/score_sets.py b/src/mavedb/routers/score_sets.py index 44fd2a30..0a7edd37 100644 --- a/src/mavedb/routers/score_sets.py +++ b/src/mavedb/routers/score_sets.py @@ -335,7 +335,6 @@ async def create_score_set( raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Unknown experiment") save_to_logging_context({"experiment": experiment.urn}) - assert_permission(user_data, experiment, Action.UPDATE) assert_permission(user_data, experiment, Action.ADD_SCORE_SET) license_ = db.query(License).filter(License.id == item_create.license_id).one_or_none() diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index 51c29073..04186b59 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -1181,3 +1181,95 @@ def test_admin_can_delete_other_users_published_experiment( del_response = client.delete(f"/api/v1/experiments/{experiment['urn']}") assert del_response.status_code == 200 + + +def test_can_add_experiment_to_own_private_experiment_set(session, client, setup_router_db): + experiment = create_experiment(client) + test_experiment = deepcopy(TEST_MINIMAL_EXPERIMENT) + test_experiment.update({"experimentSetUrn": experiment["experimentSetUrn"]}) + response = client.post("/api/v1/experiments/", json=test_experiment) + assert response.status_code == 200 + + +def test_can_add_experiment_to_own_public_experiment_set(session, data_provider, client, setup_router_db, data_files): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_variants( + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) + published_score_set = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish").json() + test_experiment = deepcopy(TEST_MINIMAL_EXPERIMENT) + test_experiment.update({"experimentSetUrn": published_score_set["experiment"]["experimentSetUrn"]}) + response = client.post("/api/v1/experiments/", json=test_experiment) + assert response.status_code == 200 + + +def test_contributor_can_add_experiment_to_others_private_experiment_set(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) + add_contributor( + session, + experiment["experimentSetUrn"], + ExperimentSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + test_experiment = deepcopy(TEST_MINIMAL_EXPERIMENT) + test_experiment.update({"experimentSetUrn": experiment["experimentSetUrn"]}) + response = client.post("/api/v1/experiments/", json=test_experiment) + assert response.status_code == 200 + + +def test_contributor_can_add_experiment_to_others_public_experiment_set(session, data_provider, client, setup_router_db, data_files): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_variants( + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) + published_score_set = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish").json() + change_ownership(session, published_score_set["urn"], ScoreSetDbModel) + change_ownership(session, published_score_set["experiment"]["urn"], ExperimentDbModel) + change_ownership(session, published_score_set["experiment"]["experimentSetUrn"], ExperimentSetDbModel) + add_contributor( + session, + published_score_set["experiment"]["experimentSetUrn"], + ExperimentSetDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + test_experiment = deepcopy(TEST_MINIMAL_EXPERIMENT) + test_experiment.update({"experimentSetUrn": published_score_set["experiment"]["experimentSetUrn"]}) + response = client.post("/api/v1/experiments/", json=test_experiment) + assert response.status_code == 200 + + +def test_cannot_add_experiment_to_others_private_experiment_set(session, client, setup_router_db): + experiment = create_experiment(client) + experiment_set_urn = experiment["experimentSetUrn"] + change_ownership(session, experiment["urn"], ExperimentDbModel) + change_ownership(session, experiment_set_urn, ExperimentSetDbModel) + test_experiment = deepcopy(TEST_MINIMAL_EXPERIMENT) + test_experiment.update({"experimentSetUrn": experiment_set_urn}) + response = client.post("/api/v1/experiments/", json=test_experiment) + assert response.status_code == 404 + response_data = response.json() + assert f"experiment set with URN '{experiment_set_urn}' not found" in response_data["detail"] + + +def test_cannot_add_experiment_to_others_public_experiment_set(session, data_provider, client, setup_router_db, data_files): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_variants( + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) + published_score_set = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish").json() + experiment_set_urn = published_score_set["experiment"]["experimentSetUrn"] + change_ownership(session, published_score_set["urn"], ScoreSetDbModel) + change_ownership(session, published_score_set["experiment"]["urn"], ExperimentDbModel) + change_ownership(session, experiment_set_urn, ExperimentSetDbModel) + test_experiment = deepcopy(TEST_MINIMAL_EXPERIMENT) + test_experiment.update({"experimentSetUrn": experiment_set_urn}) + response = client.post("/api/v1/experiments/", json=test_experiment) + assert response.status_code == 403 + response_data = response.json() + assert f"insufficient permissions for URN '{experiment_set_urn}'" in response_data["detail"] diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index b9cbc5b5..a9da3cd2 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -8,6 +8,7 @@ from arq import ArqRedis from mavedb.lib.validation.urn_re import MAVEDB_TMP_URN_RE from mavedb.models.enums.processing_state import ProcessingState +from mavedb.models.experiment import Experiment as ExperimentDbModel from mavedb.models.score_set import ScoreSet as ScoreSetDbModel from mavedb.view_models.orcid import OrcidUser from mavedb.view_models.score_set import ScoreSet, ScoreSetCreate @@ -1088,3 +1089,86 @@ def test_admin_can_delete_other_users_published_scoreset( del_response = client.delete(f"/api/v1/score-sets/{response_data['urn']}") assert del_response.status_code == 200 + + +def test_can_add_score_set_to_own_private_experiment(session, client, setup_router_db): + experiment = create_experiment(client) + score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) + score_set_post_payload["experimentUrn"] = experiment["urn"] + response = client.post("/api/v1/score-sets/", json=score_set_post_payload) + assert response.status_code == 200 + + +def test_cannot_add_score_set_to_others_private_experiment(session, client, setup_router_db): + experiment = create_experiment(client) + experiment_urn = experiment["urn"] + change_ownership(session, experiment_urn, ExperimentDbModel) + score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) + score_set_post_payload["experimentUrn"] = experiment_urn + response = client.post("/api/v1/score-sets/", json=score_set_post_payload) + assert response.status_code == 404 + response_data = response.json() + assert f"experiment with URN '{experiment_urn}' not found" in response_data["detail"] + + +def test_can_add_score_set_to_own_public_experiment(session, data_provider, client, setup_router_db, data_files): + experiment = create_experiment(client) + score_set_1 = create_seq_score_set_with_variants( + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) + pub_score_set_1 = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish").json() + score_set_2 = deepcopy(TEST_MINIMAL_SEQ_SCORESET) + score_set_2["experimentUrn"] = pub_score_set_1["experiment"]["urn"] + response = client.post("/api/v1/score-sets/", json=score_set_2) + assert response.status_code == 200 + + +def test_can_add_score_set_to_others_public_experiment(session, data_provider, client, setup_router_db, data_files): + experiment = create_experiment(client) + score_set_1 = create_seq_score_set_with_variants( + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) + pub_score_set_1 = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish").json() + change_ownership(session, pub_score_set_1["experiment"]["urn"], ExperimentDbModel) + score_set_2 = deepcopy(TEST_MINIMAL_SEQ_SCORESET) + score_set_2["experimentUrn"] = pub_score_set_1["experiment"]["urn"] + response = client.post("/api/v1/score-sets/", json=score_set_2) + assert response.status_code == 200 + + +def test_contributor_can_add_score_set_to_others_private_experiment(session, client, setup_router_db): + experiment = create_experiment(client) + change_ownership(session, experiment["urn"], ExperimentDbModel) + add_contributor( + session, + experiment["urn"], + ExperimentDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) + score_set_post_payload["experimentUrn"] = experiment["urn"] + response = client.post("/api/v1/score-sets/", json=score_set_post_payload) + assert response.status_code == 200 + + +def test_contributor_can_add_score_set_to_others_public_experiment(session, data_provider, client, setup_router_db, data_files): + experiment = create_experiment(client) + score_set = create_seq_score_set_with_variants( + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) + published_score_set = client.post(f"/api/v1/score-sets/{score_set['urn']}/publish").json() + change_ownership(session, published_score_set["experiment"]["urn"], ExperimentDbModel) + add_contributor( + session, + published_score_set["experiment"]["urn"], + ExperimentDbModel, + TEST_USER["username"], + TEST_USER["first_name"], + TEST_USER["last_name"], + ) + score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) + score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] + response = client.post("/api/v1/score-sets/", json=score_set_post_payload) + assert response.status_code == 200 \ No newline at end of file From 4c87f74c1fb86c2c2985caf4e679435413132d5b Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Fri, 11 Oct 2024 12:00:07 +1100 Subject: [PATCH 45/58] Add two comments. --- src/mavedb/lib/permissions.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/mavedb/lib/permissions.py b/src/mavedb/lib/permissions.py index cb0e1931..4b2f2715 100644 --- a/src/mavedb/lib/permissions.py +++ b/src/mavedb/lib/permissions.py @@ -125,6 +125,7 @@ def has_permission(user_data: Optional[UserData], item: Base, action: Action) -> else: return PermissionResponse(False) elif action == Action.ADD_EXPERIMENT: + # Only permitted users can add an experiment to an existing experiment set. return PermissionResponse( user_may_edit or roles_permitted(active_roles, [UserRole.admin]), 404 if private else 403, @@ -177,10 +178,12 @@ def has_permission(user_data: Optional[UserData], item: Base, action: Action) -> else: return PermissionResponse(False) elif action == Action.ADD_SCORE_SET: + # Only permitted users can add a score set to a private experiment. if user_may_edit or roles_permitted(active_roles, [UserRole.admin]): return PermissionResponse(True) elif private: return PermissionResponse(False, 404, f"experiment with URN '{item.urn}' not found") + # Any signed in user has permissions to add a score set to a public experiment elif user_data is not None: return PermissionResponse(True) else: From 6f7892d05521c292338d0bbea1a07f823f8dc5cc Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Fri, 11 Oct 2024 16:47:31 +1100 Subject: [PATCH 46/58] Change function name. Change related tests names and add few more tests based on new has_permission function. --- src/mavedb/routers/permissions.py | 4 +- tests/routers/test_permissions.py | 77 ++++++++++++++++++------------- 2 files changed, 48 insertions(+), 33 deletions(-) diff --git a/src/mavedb/routers/permissions.py b/src/mavedb/routers/permissions.py index 8d9a17b2..21cfe3ef 100644 --- a/src/mavedb/routers/permissions.py +++ b/src/mavedb/routers/permissions.py @@ -35,9 +35,9 @@ class ModelName(str, Enum): status_code=200, response_model=bool ) -async def check_authorization( +async def check_permission( *, - model_name: str, + model_name: ModelName, urn: str, action: Action, db: Session = Depends(deps.get_db), diff --git a/tests/routers/test_permissions.py b/tests/routers/test_permissions.py index 2ab55934..e3bcf79f 100644 --- a/tests/routers/test_permissions.py +++ b/tests/routers/test_permissions.py @@ -4,6 +4,7 @@ change_ownership, create_experiment, create_seq_score_set, + create_seq_score_set_with_variants, ) from mavedb.models.experiment import Experiment as ExperimentDbModel from mavedb.models.experiment_set import ExperimentSet as ExperimentSetDbModel @@ -12,7 +13,7 @@ # Test check_authorization function # Experiment set tests -def test_get_true_authorization_from_own_experiment_set_add_experiment_check(client, setup_router_db): +def test_get_true_permission_from_own_experiment_set_add_experiment_check(client, setup_router_db): experiment = create_experiment(client) response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment") @@ -20,7 +21,7 @@ def test_get_true_authorization_from_own_experiment_set_add_experiment_check(cli assert response.json() == True -def test_contributor_gets_true_authorization_from_others_experiment_set_add_experiment_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_experiment_set_add_experiment_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) @@ -38,7 +39,7 @@ def test_contributor_gets_true_authorization_from_others_experiment_set_add_expe assert response.json() == True -def test_get_false_authorization_from_other_users_experiment_set_add_experiment_check(session, client, setup_router_db): +def test_get_false_permission_from_others_experiment_set_add_experiment_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) @@ -49,7 +50,7 @@ def test_get_false_authorization_from_other_users_experiment_set_add_experiment_ assert response.json() == False -def test_cannot_get_authorization_with_wrong_action_in_experiment_set(client, setup_router_db): +def test_cannot_get_permission_with_wrong_action_in_experiment_set(client, setup_router_db): experiment = create_experiment(client) response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/edit") @@ -60,7 +61,7 @@ def test_cannot_get_authorization_with_wrong_action_in_experiment_set(client, se " 'add_role', 'publish'" -def test_cannot_get_authorization_with_non_existing_experiment_set(client, setup_router_db): +def test_cannot_get_permission_with_non_existing_experiment_set(client, setup_router_db): response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/invalidUrn/update") assert response.status_code == 404 @@ -69,7 +70,7 @@ def test_cannot_get_authorization_with_non_existing_experiment_set(client, setup # Experiment tests -def test_get_true_authorization_from_own_experiment_update_check(client, setup_router_db): +def test_get_true_permission_from_own_experiment_update_check(client, setup_router_db): experiment = create_experiment(client) response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/update") @@ -77,7 +78,7 @@ def test_get_true_authorization_from_own_experiment_update_check(client, setup_r assert response.json() == True -def test_get_true_authorization_from_own_experiment_delete_check(client, setup_router_db): +def test_get_true_permission_from_own_experiment_delete_check(client, setup_router_db): experiment = create_experiment(client) response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/delete") @@ -85,7 +86,7 @@ def test_get_true_authorization_from_own_experiment_delete_check(client, setup_r assert response.json() == True -def test_get_true_authorization_from_own_experiment_add_score_set_check(client, setup_router_db): +def test_get_true_permission_from_own_experiment_add_score_set_check(client, setup_router_db): experiment = create_experiment(client) response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") @@ -93,7 +94,7 @@ def test_get_true_authorization_from_own_experiment_add_score_set_check(client, assert response.json() == True -def test_contributor_gets_true_authorization_from_others_experiment_update_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_experiment_update_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) add_contributor( @@ -110,7 +111,7 @@ def test_contributor_gets_true_authorization_from_others_experiment_update_check assert response.json() == True -def test_contributor_gets_true_authorization_from_others_experiment_delete_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_experiment_delete_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) add_contributor( @@ -127,7 +128,7 @@ def test_contributor_gets_true_authorization_from_others_experiment_delete_check assert response.json() == True -def test_contributor_gets_true_authorization_from_others_experiment_add_score_set_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_private_experiment_add_score_set_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) add_contributor( @@ -144,18 +145,31 @@ def test_contributor_gets_true_authorization_from_others_experiment_add_score_se assert response.json() == True -# TODO: This one has problem. Need to fix has_permission function from lib/permissions.py first -def test_get_true_authorization_from_other_users_experiment_add_score_set_check(session, client, setup_router_db): +def test_get_false_permission_from_others_private_experiment_add_score_set_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") + assert response.status_code == 200 + assert response.json() == False + + +def test_get_true_permission_from_others_public_experiment_add_score_set_check(session, data_provider, client, setup_router_db, data_files): + experiment = create_experiment(client) + score_set_1 = create_seq_score_set_with_variants( + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) + pub_score_set = client.post(f"/api/v1/score-sets/{score_set_1['urn']}/publish").json() + pub_experiment_urn = pub_score_set["experiment"]["urn"] + change_ownership(session, pub_experiment_urn, ExperimentDbModel) + response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{pub_experiment_urn}/add_score_set") + assert response.status_code == 200 assert response.json() == True -def test_get_false_authorization_from_other_users_experiment_update_check(session, client, setup_router_db): +def test_get_false_permission_from_others_experiment_update_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) @@ -165,7 +179,7 @@ def test_get_false_authorization_from_other_users_experiment_update_check(sessio assert response.json() == False -def test_get_false_authorization_from_other_users_experiment_delete_check(session, client, setup_router_db): +def test_get_false_permission_from_other_users_experiment_delete_check(session, client, setup_router_db): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) @@ -175,7 +189,7 @@ def test_get_false_authorization_from_other_users_experiment_delete_check(sessio assert response.json() == False -def test_cannot_get_authorization_with_wrong_action_in_experiment(client, setup_router_db): +def test_cannot_get_permission_with_wrong_action_in_experiment(client, setup_router_db): experiment = create_experiment(client) response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/invalidAction") @@ -186,7 +200,7 @@ def test_cannot_get_authorization_with_wrong_action_in_experiment(client, setup_ " 'add_role', 'publish'" -def test_cannot_get_authorization_with_non_existing_experiment(client, setup_router_db): +def test_cannot_get_permission_with_non_existing_experiment(client, setup_router_db): response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/invalidUrn/update") assert response.status_code == 404 @@ -195,7 +209,7 @@ def test_cannot_get_authorization_with_non_existing_experiment(client, setup_rou # Score set tests -def test_get_true_authorization_from_own_score_set_update_check(client, setup_router_db): +def test_get_true_permission_from_own_score_set_update_check(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/update") @@ -204,7 +218,7 @@ def test_get_true_authorization_from_own_score_set_update_check(client, setup_ro assert response.json() == True -def test_get_true_authorization_from_own_score_set_delete_check(client, setup_router_db): +def test_get_true_permission_from_own_score_set_delete_check(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/delete") @@ -213,7 +227,7 @@ def test_get_true_authorization_from_own_score_set_delete_check(client, setup_ro assert response.json() == True -def test_get_true_authorization_from_own_score_set_publish_check(client, setup_router_db): +def test_get_true_permission_from_own_score_set_publish_check(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/publish") @@ -222,7 +236,7 @@ def test_get_true_authorization_from_own_score_set_publish_check(client, setup_r assert response.json() == True -def test_contributor_gets_true_authorization_from_others_score_set_update_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_score_set_update_check(session, client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) @@ -240,7 +254,7 @@ def test_contributor_gets_true_authorization_from_others_score_set_update_check( assert response.json() == True -def test_contributor_gets_true_authorization_from_others_score_set_delete_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_score_set_delete_check(session, client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) @@ -258,7 +272,7 @@ def test_contributor_gets_true_authorization_from_others_score_set_delete_check( assert response.json() == True -def test_contributor_gets_true_authorization_from_others_score_set_publish_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_score_set_publish_check(session, client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) @@ -276,7 +290,7 @@ def test_contributor_gets_true_authorization_from_others_score_set_publish_check assert response.json() == True -def test_get_false_authorization_from_other_users_score_set_delete_check(session, client, setup_router_db): +def test_get_false_permission_from_others_score_set_delete_check(session, client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) @@ -287,7 +301,7 @@ def test_get_false_authorization_from_other_users_score_set_delete_check(session assert response.json() == False -def test_get_false_authorization_from_other_users_score_set_update_check(session, client, setup_router_db): +def test_get_false_permission_from_others_score_set_update_check(session, client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) @@ -298,7 +312,7 @@ def test_get_false_authorization_from_other_users_score_set_update_check(session assert response.json() == False -def test_get_false_authorization_from_other_users_score_set_publish_check(session, client, setup_router_db): +def test_get_false_permission_from_others_score_set_publish_check(session, client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) change_ownership(session, score_set["urn"], ScoreSetDbModel) @@ -309,7 +323,7 @@ def test_get_false_authorization_from_other_users_score_set_publish_check(sessio assert response.json() == False -def test_cannot_get_authorization_with_wrong_action_in_score_set(client, setup_router_db): +def test_cannot_get_permission_with_wrong_action_in_score_set(client, setup_router_db): experiment = create_experiment(client) score_set = create_seq_score_set(client, experiment["urn"]) response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/invalidAction") @@ -321,7 +335,7 @@ def test_cannot_get_authorization_with_wrong_action_in_score_set(client, setup_r " 'add_role', 'publish'" -def test_cannot_get_authorization_with_non_existing_experiment(client, setup_router_db): +def test_cannot_get_permission_with_non_existing_experiment(client, setup_router_db): response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/invalidUrn/update") assert response.status_code == 404 @@ -330,9 +344,10 @@ def test_cannot_get_authorization_with_non_existing_experiment(client, setup_rou # Common invalid test -def test_cannot_get_authorization_with_non_existing_item(client, setup_router_db): +def test_cannot_get_permission_with_non_existing_item(client, setup_router_db): response = client.get(f"/api/v1/permissions/user-is-permitted/invalidModel/invalidUrn/update") - assert response.status_code == 404 + assert response.status_code == 422 response_data = response.json() - assert response_data["detail"] == "invalidModel with URN 'invalidUrn' not found" + assert response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: " \ + "'experiment', 'experiment-set', 'score-set'" From 010c229f4a387ca41826c6ada39d4a249b4e0527 Mon Sep 17 00:00:00 2001 From: EstelleDa Date: Fri, 11 Oct 2024 17:26:43 +1100 Subject: [PATCH 47/58] Change the error message. --- src/mavedb/routers/permissions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mavedb/routers/permissions.py b/src/mavedb/routers/permissions.py index 21cfe3ef..480621a1 100644 --- a/src/mavedb/routers/permissions.py +++ b/src/mavedb/routers/permissions.py @@ -62,4 +62,4 @@ async def check_permission( return permission else: logger.debug(msg="The requested resources does not exist.", extra=logging_context()) - raise HTTPException(status_code=404, detail=f"{model_name} with URN '{urn}' not found") + raise HTTPException(status_code=404, detail=f"{model_name.value} with URN '{urn}' not found") From 0d56ae28841a8b3eca0fa543aa55068bae8814cc Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Thu, 3 Oct 2024 14:46:34 -0700 Subject: [PATCH 48/58] Tests for Mapped Variants Executor --- src/mavedb/worker/jobs.py | 25 +- tests/conftest.py | 15 +- tests/helpers/constants.py | 94 +++---- tests/helpers/util.py | 4 + tests/worker/test_jobs.py | 510 ++++++++++++++++++++++++++++++++----- 5 files changed, 508 insertions(+), 140 deletions(-) diff --git a/src/mavedb/worker/jobs.py b/src/mavedb/worker/jobs.py index 39423648..3a03c4e8 100644 --- a/src/mavedb/worker/jobs.py +++ b/src/mavedb/worker/jobs.py @@ -375,15 +375,8 @@ async def map_variants_for_score_set( else: raise NonexistentMappingReferenceError() - target_gene = db.scalars( - select(TargetGene) - .join(ScoreSet) - .join(TargetSequence) - .where( - ScoreSet.id == score_set_id, - # TargetSequence.sequence == target_sequence, - ) - ).one() + # TODO(VariantEffect/dcd_mapping2#2): Handle variant mappings for score sets with more than 1 target. + target_gene = score_set.target_genes[0] excluded_pre_mapped_keys = {"sequence"} if computed_genomic_ref and mapped_genomic_ref: @@ -447,7 +440,7 @@ async def map_variants_for_score_set( ) db.add(mapped_variant) - if successful_mapped_variants == 0: + if successful_mapped_variants == 0 and mapped_scores: score_set.mapping_state = MappingState.failed score_set.mapping_errors = {"error_message": "All variants failed to map"} elif successful_mapped_variants < total_variants: @@ -500,8 +493,6 @@ async def map_variants_for_score_set( except Exception as backoff_e: score_set.mapping_state = MappingState.failed score_set.mapping_errors = {"error_message": "Encountered an internal server error during mapping"} - db.add(score_set) - db.commit() send_slack_message(backoff_e) logging_context = {**logging_context, **format_raised_exception_info_as_dict(backoff_e)} logger.critical( @@ -511,8 +502,6 @@ async def map_variants_for_score_set( else: if new_job_id and not max_retries_exceeded: score_set.mapping_state = MappingState.queued - db.add(score_set) - db.commit() logger.info( msg="After encountering an error while parsing mapped variants, another mapping job was queued.", extra=logging_context, @@ -520,8 +509,6 @@ async def map_variants_for_score_set( elif new_job_id is None and not max_retries_exceeded: score_set.mapping_state = MappingState.failed score_set.mapping_errors = {"error_message": "Encountered an internal server error during mapping"} - db.add(score_set) - db.commit() logger.error( msg="After encountering an error while parsing mapped variants, another mapping job was unable to be queued. This score set will not be mapped.", extra=logging_context, @@ -529,17 +516,17 @@ async def map_variants_for_score_set( else: score_set.mapping_state = MappingState.failed score_set.mapping_errors = {"error_message": "Encountered an internal server error during mapping"} - db.add(score_set) - db.commit() logger.error( msg="After encountering an error while parsing mapped variants, the maximum retries for this job were exceeded. This score set will not be mapped.", extra=logging_context, ) finally: + db.add(score_set) + db.commit() return {"success": False, "retried": (not max_retries_exceeded and new_job_id is not None)} ctx["state"][ctx["job_id"]] = logging_context.copy() - return {"success": True} + return {"success": True, "retried": False} async def variant_mapper_manager(ctx: dict, correlation_id: str, updater_id: int, attempt: int = 1) -> dict: diff --git a/tests/conftest.py b/tests/conftest.py index 954903d9..2588028f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -48,14 +48,14 @@ def session(postgresql): ) engine = create_engine(connection, echo=False, poolclass=NullPool) - session = sessionmaker(autocommit=False, autoflush=False, bind=engine) + session = sessionmaker(autocommit=False, autoflush=False, bind=engine)() Base.metadata.create_all(bind=engine) try: - yield session() + yield session finally: - Base.metadata.drop_all(bind=engine) + session.close() @pytest.fixture @@ -170,7 +170,14 @@ async def on_job(ctx): @pytest.fixture def standalone_worker_context(session, data_provider, arq_redis): - yield {"db": session, "hdp": data_provider, "state": {}, "job_id": "test_job", "redis": arq_redis, "pool": futures.ProcessPoolExecutor()} + yield { + "db": session, + "hdp": data_provider, + "state": {}, + "job_id": "test_job", + "redis": arq_redis, + "pool": futures.ProcessPoolExecutor(), + } @pytest.fixture() diff --git a/tests/helpers/constants.py b/tests/helpers/constants.py index f5ba6d28..d08559cc 100644 --- a/tests/helpers/constants.py +++ b/tests/helpers/constants.py @@ -1,4 +1,4 @@ -from datetime import date +from datetime import date, datetime from humps import camelize from mavedb.models.enums.processing_state import ProcessingState @@ -56,7 +56,7 @@ "is_first_login": True, } -TEST_DESCRIPTION = 'description' +TEST_DESCRIPTION = "description" ADMIN_USER_DECODED_JWT = { "sub": ADMIN_USER["username"], @@ -77,50 +77,40 @@ "key": "Variant Library Creation Method", "value": "Endogenous locus library method", "special": False, - "description": "Description" + "description": "Description", }, { "key": "Variant Library Creation Method", "value": "In vitro construct library method", "special": False, - "description": "Description" - }, - { - "key": "Variant Library Creation Method", - "value": "Other", - "special": False, - "description": "Description" + "description": "Description", }, + {"key": "Variant Library Creation Method", "value": "Other", "special": False, "description": "Description"}, { "key": "Endogenous Locus Library Method System", "value": "SaCas9", "special": False, - "description": "Description" + "description": "Description", }, { "key": "Endogenous Locus Library Method Mechanism", "value": "Base editor", "special": False, - "description": "Description" + "description": "Description", }, { "key": "In Vitro Construct Library Method System", "value": "Oligo-directed mutagenic PCR", "special": False, - "description": "Description" + "description": "Description", }, { "key": "In Vitro Construct Library Method Mechanism", "value": "Native locus replacement", "special": False, - "description": "Description" + "description": "Description", }, - { - "key": "Delivery method", - "value": "Other", - "special": False, - "description": "Description" - } + {"key": "Delivery method", "value": "Other", "special": False, "description": "Description"}, ] TEST_KEYWORDS = [ @@ -129,7 +119,7 @@ "key": "Variant Library Creation Method", "value": "Endogenous locus library method", "special": False, - "description": "Description" + "description": "Description", }, }, { @@ -137,7 +127,7 @@ "key": "Endogenous Locus Library Method System", "value": "SaCas9", "special": False, - "description": "Description" + "description": "Description", }, }, { @@ -145,17 +135,12 @@ "key": "Endogenous Locus Library Method Mechanism", "value": "Base editor", "special": False, - "description": "Description" + "description": "Description", }, }, { - "keyword": { - "key": "Delivery method", - "value": "Other", - "special": False, - "description": "Description" - }, - "description": "Details of delivery method" + "keyword": {"key": "Delivery method", "value": "Other", "special": False, "description": "Description"}, + "description": "Details of delivery method", }, ] @@ -166,15 +151,10 @@ "methodText": "Methods", "keywords": [ { - "keyword": { - "key": "Delivery method", - "value": "Other", - "special": False, - "description": "Description" - }, - "description": "Details of delivery method" + "keyword": {"key": "Delivery method", "value": "Other", "special": False, "description": "Description"}, + "description": "Details of delivery method", }, - ] + ], } TEST_MINIMAL_EXPERIMENT = { @@ -232,15 +212,11 @@ "modificationDate": date.today().isoformat(), "scoreSetUrns": [], "contributors": [], - "keywords": [{ - "keyword": { - "key": "Delivery method", - "value": "Other", - "special": False, - "description": "Description" - }, - "description": "Details of delivery method" - }, + "keywords": [ + { + "keyword": {"key": "Delivery method", "value": "Other", "special": False, "description": "Description"}, + "description": "Details of delivery method", + }, ], "doiIdentifiers": [], "primaryPublicationIdentifiers": [], @@ -276,18 +252,13 @@ "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": "Description" + "description": "Description", }, - "description": "Description" + "description": "Description", }, { - "keyword": { - "key": "Delivery method", - "value": "Other", - "special": False, - "description": "Description" - }, - "description": "Description" + "keyword": {"key": "Delivery method", "value": "Other", "special": False, "description": "Description"}, + "description": "Description", }, ], "doiIdentifiers": [], @@ -527,13 +498,18 @@ TEST_VARIANT_MAPPING_SCAFFOLD = { "metadata": {}, - "computed_reference_sequence": {"sequence_type": "dna", "sequence_id": "ga4gh:SQ.ref_test", "sequence": "ACGTTT"}, - "mapped_reference_sequence": { + "computed_genomic_reference_sequence": { + "sequence_type": "dna", + "sequence_id": "ga4gh:SQ.ref_test", + "sequence": "ACGTTT", + }, + "mapped_genomic_reference_sequence": { "sequence_type": "dna", "sequence_id": "ga4gh:SQ.map_test", "sequence_accessions": ["NC_000001.11"], }, "mapped_scores": [], "vrs_version": "2.0", - "api_version": "0.0.0", + "dcd_mapping_version": "pytest.0.0", + "mapped_date_utc": datetime.isoformat(datetime.now()), } diff --git a/tests/helpers/util.py b/tests/helpers/util.py index 7d8c9185..94fe0e8a 100644 --- a/tests/helpers/util.py +++ b/tests/helpers/util.py @@ -210,3 +210,7 @@ def mark_user_inactive(session, username): session.refresh(user) return user + + +async def awaitable_exception(): + return Exception() diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index 15f684b2..cdde4984 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -5,17 +5,22 @@ from requests import HTTPError from uuid import uuid4 from unittest.mock import patch +import requests_mock import arq.jobs import cdot.hgvs.dataproviders import jsonschema import pandas as pd import pytest +from arq import ArqRedis +from sqlalchemy import not_ +from mavedb.data_providers.services import VRSMap from mavedb.lib.mave.constants import HGVS_NT_COLUMN from mavedb.lib.score_sets import csv_data_to_df from mavedb.lib.validation.exceptions import ValidationError from mavedb.models.enums.processing_state import ProcessingState +from mavedb.models.enums.mapping_state import MappingState from mavedb.models.score_set import ScoreSet as ScoreSetDbModel from mavedb.models.variant import Variant from mavedb.models.mapped_variant import MappedVariant @@ -26,9 +31,11 @@ map_variants_for_score_set, variant_mapper_manager, MAPPING_QUEUE_NAME, + BACKOFF_LIMIT, ) from sqlalchemy import select +from tests.helpers.util import awaitable_exception from tests.helpers.constants import ( TEST_CDOT_TRANSCRIPT, TEST_MINIMAL_ACC_SCORESET, @@ -66,21 +73,29 @@ async def setup_records_and_files(async_client, data_files, input_score_set): return score_set["urn"], scores, counts -async def setup_records_files_and_variants(async_client, data_files, input_score_set, worker_ctx): - urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) +async def setup_records_files_and_variants(session, async_client, data_files, input_score_set, worker_ctx): + score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() # Patch CDOT `_get_transcript`, in the event this function is called on an accesssion based scoreset. - with patch.object(cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT): - score_set = await create_variants_for_score_set(worker_ctx, urn, 1, scores, counts) + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, + "_get_transcript", + return_value=TEST_CDOT_TRANSCRIPT, + ): + result = await create_variants_for_score_set(worker_ctx, uuid4().hex, score_set.id, 1, scores, counts) + score_set_with_variants = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() + + assert result["success"] assert score_set.processing_state is ProcessingState.success - assert score_set.num_variants == 3 + assert score_set_with_variants.num_variants == 3 - return score_set + return score_set_with_variants async def setup_mapping_output(async_client, session, score_set, empty=False): - score_set_response = await async_client.get(f"/api/v1/score-set/{score_set.urn}") + score_set_response = await async_client.get(f"/api/v1/score-sets/{score_set.urn}") mapping_output = deepcopy(TEST_VARIANT_MAPPING_SCAFFOLD) mapping_output["metadata"] = score_set_response.json() @@ -126,7 +141,13 @@ async def setup_mapping_output(async_client, session, score_set, empty=False): ], ) async def test_create_variants_for_score_set_with_validation_error( - input_score_set, validation_error, setup_worker_db, async_client, standalone_worker_context, session, data_files + input_score_set, + validation_error, + setup_worker_db, + async_client, + standalone_worker_context, + session, + data_files, ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() @@ -167,7 +188,12 @@ async def test_create_variants_for_score_set_with_validation_error( @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) async def test_create_variants_for_score_set_with_caught_exception( - input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files + input_score_set, + setup_worker_db, + async_client, + standalone_worker_context, + session, + data_files, ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() @@ -196,7 +222,12 @@ async def test_create_variants_for_score_set_with_caught_exception( @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) async def test_create_variants_for_score_set_with_caught_base_exception( - input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files + input_score_set, + setup_worker_db, + async_client, + standalone_worker_context, + session, + data_files, ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() @@ -224,13 +255,20 @@ async def test_create_variants_for_score_set_with_caught_base_exception( @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) async def test_create_variants_for_score_set_with_existing_variants( - input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files + input_score_set, + setup_worker_db, + async_client, + standalone_worker_context, + session, + data_files, ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT + cdot.hgvs.dataproviders.RESTDataProvider, + "_get_transcript", + return_value=TEST_CDOT_TRANSCRIPT, ) as hdp: success = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts @@ -250,7 +288,9 @@ async def test_create_variants_for_score_set_with_existing_variants( assert score_set.processing_state == ProcessingState.success with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT + cdot.hgvs.dataproviders.RESTDataProvider, + "_get_transcript", + return_value=TEST_CDOT_TRANSCRIPT, ) as hdp: success = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts @@ -272,7 +312,12 @@ async def test_create_variants_for_score_set_with_existing_variants( @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) async def test_create_variants_for_score_set_with_existing_exceptions( - input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files + input_score_set, + setup_worker_db, + async_client, + standalone_worker_context, + session, + data_files, ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() @@ -281,7 +326,9 @@ async def test_create_variants_for_score_set_with_existing_exceptions( # some exception will be raised no matter what in the async job. with ( patch.object( - pd.DataFrame, "isnull", side_effect=ValidationError("Test Exception", triggers=["exc_1", "exc_2"]) + pd.DataFrame, + "isnull", + side_effect=ValidationError("Test Exception", triggers=["exc_1", "exc_2"]), ) as mocked_exc, ): success = await create_variants_for_score_set( @@ -295,10 +342,15 @@ async def test_create_variants_for_score_set_with_existing_exceptions( assert score_set.num_variants == 0 assert len(db_variants) == 0 assert score_set.processing_state == ProcessingState.failed - assert score_set.processing_errors == {"exception": "Test Exception", "detail": ["exc_1", "exc_2"]} + assert score_set.processing_errors == { + "exception": "Test Exception", + "detail": ["exc_1", "exc_2"], + } with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT + cdot.hgvs.dataproviders.RESTDataProvider, + "_get_transcript", + return_value=TEST_CDOT_TRANSCRIPT, ) as hdp: success = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts @@ -326,13 +378,20 @@ async def test_create_variants_for_score_set_with_existing_exceptions( @pytest.mark.asyncio @pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) async def test_create_variants_for_score_set( - input_score_set, setup_worker_db, async_client, standalone_worker_context, session, data_files + input_score_set, + setup_worker_db, + async_client, + standalone_worker_context, + session, + data_files, ): score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT + cdot.hgvs.dataproviders.RESTDataProvider, + "_get_transcript", + return_value=TEST_CDOT_TRANSCRIPT, ) as hdp: success = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts @@ -360,7 +419,6 @@ async def test_create_variants_for_score_set( # this assumption changes in the future, tests reflecting this difference in output should be added for accession based score sets. -@pytest.mark.skip @pytest.mark.asyncio async def test_create_mapped_variants_for_scoreset( setup_worker_db, @@ -370,11 +428,17 @@ async def test_create_mapped_variants_for_scoreset( data_files, ): score_set = await setup_records_files_and_variants( - async_client, data_files, TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, ) - # Do not await, we need a co-routine object to be the return value of our `run_in_executor` mock. - mapping_test_output_for_score_set = setup_mapping_output(async_client, session, score_set) + mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) + + async def dummy_mapping_job(): + return mapping_test_output_for_score_set # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine @@ -382,30 +446,40 @@ async def test_create_mapped_variants_for_scoreset( # # TODO: Does this work on non-unix based machines. # TODO: Is it even a safe operation to patch this event loop method? - with patch.object(_UnixSelectorEventLoop, "run_in_executor", return_value=mapping_test_output_for_score_set): - await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.urn, 1) + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ): + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert result["success"] + assert not result["retried"] assert len(mapped_variants_for_score_set) == score_set.num_variants + assert score_set.mapping_state == MappingState.complete + assert score_set.mapping_errors is None - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() - -@pytest.mark.skip @pytest.mark.asyncio async def test_create_mapped_variants_for_scoreset_with_existing_mapped_variants( setup_worker_db, async_client, standalone_worker_context, session, data_files ): score_set = await setup_records_files_and_variants( - async_client, data_files, TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, ) - # Do not await, we need a co-routine object to be the return value of our `run_in_executor` mock. - mapping_test_output_for_score_set = setup_mapping_output(async_client, session, score_set) + mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) + + async def dummy_mapping_job(): + return mapping_test_output_for_score_set # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine @@ -413,7 +487,11 @@ async def test_create_mapped_variants_for_scoreset_with_existing_mapped_variants # # TODO: Does this work on non-unix based machines. # TODO: Is it even a safe operation to patch this event loop method? - with patch.object(_UnixSelectorEventLoop, "run_in_executor", return_value=mapping_test_output_for_score_set): + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ): existing_variant = session.scalars(select(Variant)).first() if not existing_variant: @@ -428,36 +506,246 @@ async def test_create_mapped_variants_for_scoreset_with_existing_mapped_variants mapped_date=date.today(), vrs_version="2.0", mapping_api_version="0.0.0", + current=True, ) ) session.commit() - await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.urn, 1) + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() - assert len(mapped_variants_for_score_set) == score_set.num_variants + preexisting_variants = session.scalars( + select(MappedVariant) + .join(Variant) + .join(ScoreSetDbModel) + .filter(ScoreSetDbModel.urn == score_set.urn, not_(MappedVariant.current)) + ).all() + new_variants = session.scalars( + select(MappedVariant) + .join(Variant) + .join(ScoreSetDbModel) + .filter(ScoreSetDbModel.urn == score_set.urn, MappedVariant.current) + ).all() + assert result["success"] + assert not result["retried"] + assert len(mapped_variants_for_score_set) == score_set.num_variants + 1 + assert len(preexisting_variants) == 1 + assert len(new_variants) == score_set.num_variants + assert score_set.mapping_state == MappingState.complete + assert score_set.mapping_errors is None - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + +@pytest.mark.asyncio +async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_score_set_selection( + setup_worker_db, async_client, standalone_worker_context, session, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + # + # TODO: Does this work on non-unix based machines? + # TODO: Is it even a safe operation to patch this event loop method? + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=awaitable_exception(), + ): + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id + 5, 1) + + # TODO: How are errors persisted? Test persistence mechanism. + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert not result["success"] + assert not result["retried"] + assert len(mapped_variants_for_score_set) == 0 + # When we cannot fetch a score set, these fields are unable to be updated. + assert score_set.mapping_state == MappingState.queued + assert score_set.mapping_errors is None + + +@pytest.mark.asyncio +async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_vrs_object( + setup_worker_db, async_client, standalone_worker_context, session, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + with patch.object( + VRSMap, + "__init__", + return_value=Exception(), + ): + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) + + # TODO: How are errors persisted? Test persistence mechanism. + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert not result["success"] + assert not result["retried"] + assert len(mapped_variants_for_score_set) == 0 + assert score_set.mapping_state == MappingState.failed + assert score_set.mapping_errors is not None -@pytest.mark.skip @pytest.mark.asyncio async def test_create_mapped_variants_for_scoreset_mapping_exception( setup_worker_db, async_client, standalone_worker_context, session, data_files ): - async def awaitable_http_error(): - raise HTTPError + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + # + # TODO: Does this work on non-unix based machines? + # TODO: Is it even a safe operation to patch this event loop method? + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=awaitable_exception(), + ): + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) + + # TODO: How are errors persisted? Test persistence mechanism. + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert not result["success"] + assert result["retried"] + assert len(mapped_variants_for_score_set) == 0 + assert score_set.mapping_state == MappingState.queued + assert score_set.mapping_errors is not None + + +@pytest.mark.asyncio +async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_limit_reached( + setup_worker_db, async_client, standalone_worker_context, session, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + # + # TODO: Does this work on non-unix based machines? + # TODO: Is it even a safe operation to patch this event loop method? + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=awaitable_exception(), + ): + result = await map_variants_for_score_set( + standalone_worker_context, uuid4().hex, score_set.id, 1, BACKOFF_LIMIT + 1 + ) + + # TODO: How are errors persisted? Test persistence mechanism. + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert not result["success"] + assert not result["retried"] + assert len(mapped_variants_for_score_set) == 0 + assert score_set.mapping_state == MappingState.failed + assert score_set.mapping_errors is not None + + +@pytest.mark.asyncio +async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_failed( + setup_worker_db, async_client, standalone_worker_context, session, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + # + # TODO: Does this work on non-unix based machines. + # TODO: Is it even a safe operation to patch this event loop method? + with ( + patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=awaitable_exception(), + ), + patch.object(ArqRedis, "lpush", awaitable_exception()), + ): + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) + + # TODO: How are errors persisted? Test persistence mechanism. + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert not result["success"] + assert not result["retried"] + assert len(mapped_variants_for_score_set) == 0 + # Behavior for exception in mapping is retried job + assert score_set.mapping_state == MappingState.failed + assert score_set.mapping_errors is not None + + +@pytest.mark.asyncio +async def test_create_mapped_variants_for_scoreset_parsing_exception_with_retry( + setup_worker_db, async_client, standalone_worker_context, session, data_files +): score_set = await setup_records_files_and_variants( - async_client, data_files, TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, ) - # Do not await, we need a co-routine object which raises an http error once awaited. - mapping_test_output_for_score_set = awaitable_http_error() + mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) + + async def dummy_mapping_job(): + mapping_test_output_for_score_set.pop("computed_genomic_reference_sequence") + return mapping_test_output_for_score_set # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine @@ -465,27 +753,128 @@ async def awaitable_http_error(): # # TODO: Does this work on non-unix based machines? # TODO: Is it even a safe operation to patch this event loop method? - with patch.object(_UnixSelectorEventLoop, "run_in_executor", return_value=mapping_test_output_for_score_set): - await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.urn, 1) + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ): + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) # TODO: How are errors persisted? Test persistence mechanism. + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + + assert not result["success"] + assert result["retried"] assert len(mapped_variants_for_score_set) == 0 + assert score_set.mapping_state == MappingState.queued + assert score_set.mapping_errors is not None - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + +@pytest.mark.asyncio +async def test_create_mapped_variants_for_scoreset_parsing_exception_retry_failed( + setup_worker_db, async_client, standalone_worker_context, session, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) + + async def dummy_mapping_job(): + mapping_test_output_for_score_set.pop("computed_genomic_reference_sequence") + return mapping_test_output_for_score_set + + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + # + # TODO: Does this work on non-unix based machines? + # TODO: Is it even a safe operation to patch this event loop method? + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ), patch.object(ArqRedis, "lpush", awaitable_exception()): + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) + + # TODO: How are errors persisted? Test persistence mechanism. + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert not result["success"] + assert not result["retried"] + assert len(mapped_variants_for_score_set) == 0 + # Behavior for exception outside mapping is failed job + assert score_set.mapping_state == MappingState.failed + assert score_set.mapping_errors is not None + + +@pytest.mark.asyncio +async def test_create_mapped_variants_for_scoreset_parsing_exception_retry_limit_reached( + setup_worker_db, async_client, standalone_worker_context, session, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) + + async def dummy_mapping_job(): + mapping_test_output_for_score_set.pop("computed_genomic_reference_sequence") + return mapping_test_output_for_score_set + + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + # + # TODO: Does this work on non-unix based machines? + # TODO: Is it even a safe operation to patch this event loop method? + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ): + result = await map_variants_for_score_set( + standalone_worker_context, uuid4().hex, score_set.id, 1, BACKOFF_LIMIT + 1 + ) + + # TODO: How are errors persisted? Test persistence mechanism. + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert not result["success"] + assert not result["retried"] + assert len(mapped_variants_for_score_set) == 0 + # Behavior for exception outside mapping is failed job + assert score_set.mapping_state == MappingState.failed + assert score_set.mapping_errors is not None -@pytest.mark.skip @pytest.mark.asyncio async def test_create_mapped_variants_for_scoreset_no_mapping_output( setup_worker_db, async_client, standalone_worker_context, session, data_files ): score_set = await setup_records_files_and_variants( - async_client, data_files, TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, ) # Do not await, we need a co-routine object to be the return value of our `run_in_executor` mock. @@ -497,17 +886,22 @@ async def test_create_mapped_variants_for_scoreset_no_mapping_output( # # TODO: Does this work on non-unix based machines. # TODO: Is it even a safe operation to patch this event loop method? - with patch.object(_UnixSelectorEventLoop, "run_in_executor", return_value=mapping_test_output_for_score_set): - await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.urn, 1) + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=mapping_test_output_for_score_set, + ): + result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() - assert len(mapped_variants_for_score_set) == 0 - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + assert result["success"] + assert not result["retried"] + assert len(mapped_variants_for_score_set) == 0 + assert score_set.mapping_state == MappingState.failed @pytest.mark.skip From d5f6f8e8edd118ef22f2a1ab81db1caa6fda193b Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 4 Oct 2024 10:04:54 -0700 Subject: [PATCH 49/58] Add Assertions for Queue Length and In-Progress Jobs --- tests/worker/test_jobs.py | 61 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index cdde4984..eeaa3e2d 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -31,6 +31,7 @@ map_variants_for_score_set, variant_mapper_manager, MAPPING_QUEUE_NAME, + MAPPING_CURRENT_ID_NAME, BACKOFF_LIMIT, ) from sqlalchemy import select @@ -94,6 +95,11 @@ async def setup_records_files_and_variants(session, async_client, data_files, in return score_set_with_variants +async def sanitize_mapping_queue(standalone_worker_context, score_set): + queued_job = await standalone_worker_context["redis"].rpop(MAPPING_QUEUE_NAME) + assert int(queued_job.decode("utf-8")) == score_set.id + + async def setup_mapping_output(async_client, session, score_set, empty=False): score_set_response = await async_client.get(f"/api/v1/score-sets/{score_set.urn}") @@ -434,6 +440,9 @@ async def test_create_mapped_variants_for_scoreset( TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) @@ -457,6 +466,8 @@ async def dummy_mapping_job(): mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == score_set.num_variants @@ -475,6 +486,9 @@ async def test_create_mapped_variants_for_scoreset_with_existing_mapped_variants TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) @@ -529,6 +543,8 @@ async def dummy_mapping_job(): .join(ScoreSetDbModel) .filter(ScoreSetDbModel.urn == score_set.urn, MappedVariant.current) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == score_set.num_variants + 1 @@ -549,6 +565,9 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_sc TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine @@ -569,6 +588,8 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_sc select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert not result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == 0 @@ -588,6 +609,9 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_vr TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) with patch.object( VRSMap, @@ -602,6 +626,8 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_vr select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert not result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == 0 @@ -620,6 +646,9 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception( TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine @@ -640,6 +669,8 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 1 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert not result["success"] assert result["retried"] assert len(mapped_variants_for_score_set) == 0 @@ -658,6 +689,9 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_limit TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine @@ -680,6 +714,8 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_limit select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert not result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == 0 @@ -698,6 +734,9 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_faile TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine @@ -721,6 +760,8 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_faile select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert not result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == 0 @@ -740,6 +781,9 @@ async def test_create_mapped_variants_for_scoreset_parsing_exception_with_retry( TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) @@ -766,6 +810,8 @@ async def dummy_mapping_job(): select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 1 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert not result["success"] assert result["retried"] assert len(mapped_variants_for_score_set) == 0 @@ -784,6 +830,9 @@ async def test_create_mapped_variants_for_scoreset_parsing_exception_retry_faile TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) @@ -810,6 +859,8 @@ async def dummy_mapping_job(): select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert not result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == 0 @@ -829,6 +880,9 @@ async def test_create_mapped_variants_for_scoreset_parsing_exception_retry_limit TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) @@ -857,6 +911,8 @@ async def dummy_mapping_job(): select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert not result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == 0 @@ -876,6 +932,9 @@ async def test_create_mapped_variants_for_scoreset_no_mapping_output( TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) + # We are skipping the manager in these cases to test directly on the mapping job, + # so pop from the queue as if the manager had executed. + await sanitize_mapping_queue(standalone_worker_context, score_set) # Do not await, we need a co-routine object to be the return value of our `run_in_executor` mock. mapping_test_output_for_score_set = setup_mapping_output(async_client, session, score_set, empty=True) @@ -898,6 +957,8 @@ async def test_create_mapped_variants_for_scoreset_no_mapping_output( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) ).all() + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" assert result["success"] assert not result["retried"] assert len(mapped_variants_for_score_set) == 0 From eda1a08fa9cdea86427e58fc4c6a5c2442295ef1 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 7 Oct 2024 14:07:37 -0700 Subject: [PATCH 50/58] Remove IDs That Failed to be Queued --- src/mavedb/worker/jobs.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/mavedb/worker/jobs.py b/src/mavedb/worker/jobs.py index 3a03c4e8..6353b36e 100644 --- a/src/mavedb/worker/jobs.py +++ b/src/mavedb/worker/jobs.py @@ -570,8 +570,17 @@ async def variant_mapper_manager(ctx: dict, correlation_id: str, updater_id: int except Exception as e: send_slack_message(e) + + # Attempt to remove this item from the mapping queue. + try: + await redis.lrem(MAPPING_QUEUE_NAME, 1, queued_id) # type: ignore + logger.warning(msg="Removed un-queueable score set from the queue.", extra=logging_context) + except Exception: + pass + logging_context = {**logging_context, **format_raised_exception_info_as_dict(e)} logger.error(msg="Variant mapper manager encountered an unexpected error during setup.", extra=logging_context) + return {"success": False, "enqueued_job": None} new_job = None @@ -633,6 +642,13 @@ async def variant_mapper_manager(ctx: dict, correlation_id: str, updater_id: int if not queued_score_set: return {"success": False, "enqueued_job": new_job_id} + # Attempt to remove this item from the mapping queue. + try: + await redis.lrem(MAPPING_QUEUE_NAME, 1, queued_id) # type: ignore + logger.warning(msg="Removed un-queueable score set from the queue.", extra=logging_context) + except Exception: + pass + score_set_exc = db.scalars(select(ScoreSet).where(ScoreSet.id == queued_score_set.id)).one_or_none() if score_set_exc: score_set_exc.mapping_state = MappingState.failed From 2080b9d509d96c50736c085c52b6698ecb501881 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Mon, 7 Oct 2024 14:07:54 -0700 Subject: [PATCH 51/58] Tests for Mapping Manager --- tests/worker/test_jobs.py | 314 +++++++++++++++++++++++++++++++++++--- 1 file changed, 294 insertions(+), 20 deletions(-) diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index eeaa3e2d..f746c036 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -965,39 +965,313 @@ async def test_create_mapped_variants_for_scoreset_no_mapping_output( assert score_set.mapping_state == MappingState.failed -@pytest.mark.skip @pytest.mark.asyncio -async def test_mapping_manager_empty_queue(setup_worker_db, standalone_worker_context, session): - queued_job = await variant_mapper_manager(standalone_worker_context) +async def test_mapping_manager_empty_queue(setup_worker_db, standalone_worker_context): + result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) - # No new jobs should have been created if nothing is in the queue. - assert queued_job is None - session.commit() + # No new jobs should have been created if nothing is in the queue, and the queue should remain empty. + assert result["enqueued_job"] is None + assert result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" -@pytest.mark.skip @pytest.mark.asyncio -async def test_mapping_manager_occupied_queue_mapping_in_progress(setup_worker_db, standalone_worker_context, session): - await standalone_worker_context["redis"].lpush(MAPPING_QUEUE_NAME, "mavedb:test-urn") +async def test_mapping_manager_empty_queue_error_during_setup(setup_worker_db, standalone_worker_context): + await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "") + with patch.object(ArqRedis, "rpop", Exception()): + result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + # No new jobs should have been created if nothing is in the queue, and the queue should remain empty. + assert result["enqueued_job"] is None + assert not result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" + + +@pytest.mark.asyncio +async def test_mapping_manager_occupied_queue_mapping_in_progress( + setup_worker_db, standalone_worker_context, session, async_client, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "5") with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.in_progress): - queued_job = await variant_mapper_manager(standalone_worker_context) + result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) - # Execution should be deferred if a job is in progress. - assert await queued_job.status() is arq.jobs.JobStatus.deferred - session.commit() + # Execution should be deferred if a job is in progress, and the queue should contain one entry which is the deferred ID. + assert result["enqueued_job"] is not None + assert ( + await arq.jobs.Job(result["enqueued_job"], standalone_worker_context["redis"]).status() + ) == arq.jobs.JobStatus.deferred + assert result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 1 + assert (await standalone_worker_context["redis"].rpop(MAPPING_QUEUE_NAME)).decode("utf-8") == str(score_set.id) + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "5" + assert score_set.mapping_state == MappingState.queued + assert score_set.mapping_errors is None -@pytest.mark.skip @pytest.mark.asyncio async def test_mapping_manager_occupied_queue_mapping_not_in_progress( - setup_worker_db, standalone_worker_context, session + setup_worker_db, standalone_worker_context, session, async_client, data_files ): - await standalone_worker_context["redis"].lpush(MAPPING_QUEUE_NAME, "mavedb:test-urn") + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "") with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.not_found): - queued_job = await variant_mapper_manager(standalone_worker_context) + result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) - # VRS Mapping jobs have the same ID. - assert queued_job.job_id == "vrs_map" - session.commit() + # Mapping job should be queued if none is currently running, and the queue should now be empty. + assert result["enqueued_job"] is not None + assert ( + await arq.jobs.Job(result["enqueued_job"], standalone_worker_context["redis"]).status() + ) == arq.jobs.JobStatus.queued + assert result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + # We don't actually start processing these score sets. + assert score_set.mapping_state == MappingState.queued + assert score_set.mapping_errors is None + + +@pytest.mark.asyncio +async def test_mapping_manager_occupied_queue_mapping_in_progress_error_during_enqueue( + setup_worker_db, standalone_worker_context, session, async_client, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "5") + with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.in_progress), patch.object( + ArqRedis, "enqueue_job", return_value=awaitable_exception() + ): + result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + + # Execution should be deferred if a job is in progress, and the queue should contain one entry which is the deferred ID. + assert result["enqueued_job"] is None + assert not result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert (await standalone_worker_context["redis"].get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "5" + assert score_set.mapping_state == MappingState.failed + assert score_set.mapping_errors is not None + + +@pytest.mark.asyncio +async def test_mapping_manager_occupied_queue_mapping_not_in_progress_error_during_enqueue( + setup_worker_db, standalone_worker_context, session, async_client, data_files +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "") + with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.not_found), patch.object( + ArqRedis, "enqueue_job", return_value=awaitable_exception() + ): + result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + + # Enqueue would have failed, the job is unsuccessful, and we remove the queued item. + assert result["enqueued_job"] is None + assert not result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 + assert score_set.mapping_state == MappingState.failed + assert score_set.mapping_errors is not None + + +@pytest.mark.asyncio +async def test_mapping_manager_multiple_score_sets_occupy_queue_mapping_in_progress( + setup_worker_db, standalone_worker_context, session, async_client, data_files +): + score_set_id_1 = ( + await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + ).id + score_set_id_2 = ( + await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + ).id + score_set_id_3 = ( + await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + ).id + + await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "5") + with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.in_progress): + result1 = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + result2 = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + result3 = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + + # All three jobs should complete successfully... + assert result1["success"] + assert result2["success"] + assert result3["success"] + + # ...with a new job enqueued... + assert result1["enqueued_job"] is not None + assert result2["enqueued_job"] is not None + assert result3["enqueued_job"] is not None + + # ...of which all should be deferred jobs of the "variant_mapper_manager" variety... + assert ( + await arq.jobs.Job(result1["enqueued_job"], standalone_worker_context["redis"]).status() + ) == arq.jobs.JobStatus.deferred + assert ( + await arq.jobs.Job(result2["enqueued_job"], standalone_worker_context["redis"]).status() + ) == arq.jobs.JobStatus.deferred + assert ( + await arq.jobs.Job(result3["enqueued_job"], standalone_worker_context["redis"]).status() + ) == arq.jobs.JobStatus.deferred + + assert ( + await arq.jobs.Job(result1["enqueued_job"], standalone_worker_context["redis"]).info() + ).function == "variant_mapper_manager" + assert ( + await arq.jobs.Job(result2["enqueued_job"], standalone_worker_context["redis"]).info() + ).function == "variant_mapper_manager" + assert ( + await arq.jobs.Job(result3["enqueued_job"], standalone_worker_context["redis"]).info() + ).function == "variant_mapper_manager" + + # ...and the queue state should have three jobs, each of our three created score sets. + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 3 + assert (await standalone_worker_context["redis"].rpop(MAPPING_QUEUE_NAME)).decode("utf-8") == str(score_set_id_1) + assert (await standalone_worker_context["redis"].rpop(MAPPING_QUEUE_NAME)).decode("utf-8") == str(score_set_id_2) + assert (await standalone_worker_context["redis"].rpop(MAPPING_QUEUE_NAME)).decode("utf-8") == str(score_set_id_3) + + score_set1 = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.id == score_set_id_1)).one() + score_set2 = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.id == score_set_id_2)).one() + score_set3 = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.id == score_set_id_3)).one() + # Each score set should remain queued with no mapping errors. + assert score_set1.mapping_state == MappingState.queued + assert score_set2.mapping_state == MappingState.queued + assert score_set3.mapping_state == MappingState.queued + assert score_set1.mapping_errors is None + assert score_set2.mapping_errors is None + assert score_set3.mapping_errors is None + + +@pytest.mark.asyncio +async def test_mapping_manager_multiple_score_sets_occupy_queue_mapping_not_in_progress( + setup_worker_db, standalone_worker_context, session, async_client, data_files +): + score_set_id_1 = ( + await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + ).id + score_set_id_2 = ( + await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + ).id + score_set_id_3 = ( + await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + ).id + + await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "") + with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.not_found): + result1 = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + + # Mock the first job being in-progress + await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, str(score_set_id_1)) + with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.in_progress): + result2 = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + result3 = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) + + # All three jobs should complete successfully... + assert result1["success"] + assert result2["success"] + assert result3["success"] + + # ...with a new job enqueued... + assert result1["enqueued_job"] is not None + assert result2["enqueued_job"] is not None + assert result3["enqueued_job"] is not None + + # ...of which the first should be a queued job of the "map_variants_for_score_set" variety and the other two should be + # deferred jobs of the "variant_mapper_manager" variety... + assert ( + await arq.jobs.Job(result1["enqueued_job"], standalone_worker_context["redis"]).status() + ) == arq.jobs.JobStatus.queued + assert ( + await arq.jobs.Job(result2["enqueued_job"], standalone_worker_context["redis"]).status() + ) == arq.jobs.JobStatus.deferred + assert ( + await arq.jobs.Job(result3["enqueued_job"], standalone_worker_context["redis"]).status() + ) == arq.jobs.JobStatus.deferred + + assert ( + await arq.jobs.Job(result1["enqueued_job"], standalone_worker_context["redis"]).info() + ).function == "map_variants_for_score_set" + assert ( + await arq.jobs.Job(result2["enqueued_job"], standalone_worker_context["redis"]).info() + ).function == "variant_mapper_manager" + assert ( + await arq.jobs.Job(result3["enqueued_job"], standalone_worker_context["redis"]).info() + ).function == "variant_mapper_manager" + + # ...and the queue state should have two jobs, neither of which should be the first score set. + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 2 + assert (await standalone_worker_context["redis"].rpop(MAPPING_QUEUE_NAME)).decode("utf-8") == str(score_set_id_2) + assert (await standalone_worker_context["redis"].rpop(MAPPING_QUEUE_NAME)).decode("utf-8") == str(score_set_id_3) + + score_set1 = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.id == score_set_id_1)).one() + score_set2 = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.id == score_set_id_2)).one() + score_set3 = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.id == score_set_id_3)).one() + # We don't actually process any score sets in the manager job, and each should have no mapping errors. + assert score_set1.mapping_state == MappingState.queued + assert score_set2.mapping_state == MappingState.queued + assert score_set3.mapping_state == MappingState.queued + assert score_set1.mapping_errors is None + assert score_set2.mapping_errors is None + assert score_set3.mapping_errors is None From 17f3a405e8d2c1b762c24ea08b57bed8b5b675ba Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 11 Oct 2024 11:06:53 -0700 Subject: [PATCH 52/58] Remove Unnecessary Mapping Check --- src/mavedb/worker/jobs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mavedb/worker/jobs.py b/src/mavedb/worker/jobs.py index 6353b36e..8b8ae5b6 100644 --- a/src/mavedb/worker/jobs.py +++ b/src/mavedb/worker/jobs.py @@ -440,7 +440,7 @@ async def map_variants_for_score_set( ) db.add(mapped_variant) - if successful_mapped_variants == 0 and mapped_scores: + if successful_mapped_variants == 0: score_set.mapping_state = MappingState.failed score_set.mapping_errors = {"error_message": "All variants failed to map"} elif successful_mapped_variants < total_variants: From 3449e61cee2efc80776e9a1dcbdbe3c6566e6501 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 11 Oct 2024 13:12:05 -0700 Subject: [PATCH 53/58] Add Workflow Tests for Worker Jobs Adds tests that ensure workflow invocations succeed on the worker. These tests ensure that worker jobs which invoke other worker jobs are functioning correctly. --- tests/conftest.py | 2 + tests/worker/test_jobs.py | 374 +++++++++++++++++++++++++++----------- 2 files changed, 271 insertions(+), 105 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 2588028f..9d0bed5a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -151,6 +151,8 @@ async def on_startup(ctx): async def on_job(ctx): ctx["db"] = session ctx["hdp"] = data_provider + ctx["state"] = {} + ctx["pool"] = futures.ProcessPoolExecutor() worker_ = Worker( functions=[create_variants_for_score_set, map_variants_for_score_set, variant_mapper_manager], diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index f746c036..525c160d 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -168,7 +168,7 @@ async def test_create_variants_for_score_set_with_validation_error( return_value=TEST_CDOT_TRANSCRIPT, ) as hdp, ): - success = await create_variants_for_score_set( + result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) @@ -185,10 +185,8 @@ async def test_create_variants_for_score_set_with_validation_error( assert len(db_variants) == 0 assert score_set.processing_state == ProcessingState.failed assert score_set.processing_errors == validation_error - - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + assert not result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 @pytest.mark.asyncio @@ -207,7 +205,7 @@ async def test_create_variants_for_score_set_with_caught_exception( # This is somewhat dumb and wouldn't actually happen like this, but it serves as an effective way to guarantee # some exception will be raised no matter what in the async job. with (patch.object(pd.DataFrame, "isnull", side_effect=Exception) as mocked_exc,): - success = await create_variants_for_score_set( + result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) mocked_exc.assert_called() @@ -219,10 +217,8 @@ async def test_create_variants_for_score_set_with_caught_exception( assert len(db_variants) == 0 assert score_set.processing_state == ProcessingState.failed assert score_set.processing_errors == {"detail": [], "exception": ""} - - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + assert not result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 @pytest.mark.asyncio @@ -241,7 +237,7 @@ async def test_create_variants_for_score_set_with_caught_base_exception( # This is somewhat (extra) dumb and wouldn't actually happen like this, but it serves as an effective way to guarantee # some base exception will be handled no matter what in the async job. with (patch.object(pd.DataFrame, "isnull", side_effect=BaseException),): - success = await create_variants_for_score_set( + result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) @@ -252,10 +248,8 @@ async def test_create_variants_for_score_set_with_caught_base_exception( assert len(db_variants) == 0 assert score_set.processing_state == ProcessingState.failed assert score_set.processing_errors is None - - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + assert not result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 0 @pytest.mark.asyncio @@ -276,7 +270,7 @@ async def test_create_variants_for_score_set_with_existing_variants( "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT, ) as hdp: - success = await create_variants_for_score_set( + result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) @@ -286,6 +280,7 @@ async def test_create_variants_for_score_set_with_existing_variants( else: hdp.assert_called_once() + await sanitize_mapping_queue(standalone_worker_context, score_set) db_variants = session.scalars(select(Variant)).all() score_set = session.query(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set_urn).one() @@ -298,7 +293,7 @@ async def test_create_variants_for_score_set_with_existing_variants( "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT, ) as hdp: - success = await create_variants_for_score_set( + result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) @@ -309,10 +304,8 @@ async def test_create_variants_for_score_set_with_existing_variants( assert len(db_variants) == 3 assert score_set.processing_state == ProcessingState.success assert score_set.processing_errors is None - - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + assert result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 1 @pytest.mark.asyncio @@ -337,7 +330,7 @@ async def test_create_variants_for_score_set_with_existing_exceptions( side_effect=ValidationError("Test Exception", triggers=["exc_1", "exc_2"]), ) as mocked_exc, ): - success = await create_variants_for_score_set( + result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) mocked_exc.assert_called() @@ -358,7 +351,7 @@ async def test_create_variants_for_score_set_with_existing_exceptions( "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT, ) as hdp: - success = await create_variants_for_score_set( + result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) @@ -375,10 +368,8 @@ async def test_create_variants_for_score_set_with_existing_exceptions( assert len(db_variants) == 3 assert score_set.processing_state == ProcessingState.success assert score_set.processing_errors is None - - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + assert result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 1 @pytest.mark.asyncio @@ -399,7 +390,7 @@ async def test_create_variants_for_score_set( "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT, ) as hdp: - success = await create_variants_for_score_set( + result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) @@ -415,10 +406,99 @@ async def test_create_variants_for_score_set( assert score_set.num_variants == 3 assert len(db_variants) == 3 assert score_set.processing_state == ProcessingState.success + assert result["success"] + assert (await standalone_worker_context["redis"].llen(MAPPING_QUEUE_NAME)) == 1 + + +@pytest.mark.asyncio +@pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) +async def test_create_variants_for_score_set_enqueues_manager_and_successful_mapping( + input_score_set, + setup_worker_db, + session, + async_client, + data_files, + arq_worker, + arq_redis, +): + score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() - # Have to commit at the end of async tests for DB threads to be released. Otherwise pytest - # thinks we are still using the session fixture and will hang indefinitely. - session.commit() + async def dummy_mapping_job(): + return await setup_mapping_output(async_client, session, score_set) + + with patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, + "_get_transcript", + return_value=TEST_CDOT_TRANSCRIPT, + ) as hdp, patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ), patch( + "mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0 + ): + await arq_redis.enqueue_job("create_variants_for_score_set", uuid4().hex, score_set.id, 1, scores, counts) + await arq_worker.async_run() + await arq_worker.run_check() + + # Call data provider _get_transcript method if this is an accession based score set, otherwise do not. + if all(["targetSequence" in target for target in input_score_set["targetGenes"]]): + hdp.assert_not_called() + else: + hdp.assert_called_once() + + db_variants = session.scalars(select(Variant)).all() + score_set = session.query(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set_urn).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert score_set.num_variants == 3 + assert len(db_variants) == 3 + assert score_set.processing_state == ProcessingState.success + assert (await arq_redis.llen(MAPPING_QUEUE_NAME)) == 0 + assert (await arq_redis.get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" + assert len(mapped_variants_for_score_set) == score_set.num_variants + assert score_set.mapping_state == MappingState.complete + assert score_set.mapping_errors is None + + +@pytest.mark.asyncio +@pytest.mark.parametrize("input_score_set", (TEST_MINIMAL_SEQ_SCORESET, TEST_MINIMAL_ACC_SCORESET)) +async def test_create_variants_for_score_set_exception_skips_mapping( + input_score_set, + setup_worker_db, + session, + async_client, + data_files, + arq_worker, + arq_redis, +): + score_set_urn, scores, counts = await setup_records_and_files(async_client, data_files, input_score_set) + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set_urn)).one() + + with patch.object(pd.DataFrame, "isnull", side_effect=Exception) as mocked_exc: + await arq_redis.enqueue_job("create_variants_for_score_set", uuid4().hex, score_set.id, 1, scores, counts) + await arq_worker.async_run() + await arq_worker.run_check() + + mocked_exc.assert_called() + + db_variants = session.scalars(select(Variant)).all() + score_set = session.query(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set_urn).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + + assert score_set.num_variants == 0 + assert len(db_variants) == 0 + assert score_set.processing_state == ProcessingState.failed + assert score_set.processing_errors == {"detail": [], "exception": ""} + assert (await arq_redis.llen(MAPPING_QUEUE_NAME)) == 0 + assert len(mapped_variants_for_score_set) == 0 + assert score_set.mapping_state == MappingState.not_attempted + assert score_set.mapping_errors is None # NOTE: These tests operate under the assumption that mapping output is consistent between accession based and sequence based score sets. If @@ -440,21 +520,17 @@ async def test_create_mapped_variants_for_scoreset( TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) - mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) - async def dummy_mapping_job(): - return mapping_test_output_for_score_set + return await setup_mapping_output(async_client, session, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines. - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", @@ -486,21 +562,17 @@ async def test_create_mapped_variants_for_scoreset_with_existing_mapped_variants TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) - mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) - async def dummy_mapping_job(): - return mapping_test_output_for_score_set + return await setup_mapping_output(async_client, session, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines. - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", @@ -565,16 +637,14 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_sc TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines? - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", @@ -582,7 +652,6 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_sc ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id + 5, 1) - # TODO: How are errors persisted? Test persistence mechanism. score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) @@ -609,8 +678,9 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_vr TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) with patch.object( @@ -620,7 +690,6 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_vr ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) - # TODO: How are errors persisted? Test persistence mechanism. score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) @@ -646,16 +715,14 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception( TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines? - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", @@ -663,7 +730,6 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception( ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) - # TODO: How are errors persisted? Test persistence mechanism. score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) @@ -689,16 +755,14 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_limit TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines? - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", @@ -708,7 +772,6 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_limit standalone_worker_context, uuid4().hex, score_set.id, 1, BACKOFF_LIMIT + 1 ) - # TODO: How are errors persisted? Test persistence mechanism. score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) @@ -734,16 +797,14 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_faile TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines. - # TODO: Is it even a safe operation to patch this event loop method? with ( patch.object( _UnixSelectorEventLoop, @@ -754,7 +815,6 @@ async def test_create_mapped_variants_for_scoreset_mapping_exception_retry_faile ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) - # TODO: How are errors persisted? Test persistence mechanism. score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) @@ -781,22 +841,19 @@ async def test_create_mapped_variants_for_scoreset_parsing_exception_with_retry( TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) - mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) - async def dummy_mapping_job(): + mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) mapping_test_output_for_score_set.pop("computed_genomic_reference_sequence") return mapping_test_output_for_score_set # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines? - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", @@ -804,7 +861,6 @@ async def dummy_mapping_job(): ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) - # TODO: How are errors persisted? Test persistence mechanism. score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) @@ -830,22 +886,19 @@ async def test_create_mapped_variants_for_scoreset_parsing_exception_retry_faile TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) - mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) - async def dummy_mapping_job(): + mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) mapping_test_output_for_score_set.pop("computed_genomic_reference_sequence") return mapping_test_output_for_score_set # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines? - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", @@ -853,7 +906,6 @@ async def dummy_mapping_job(): ), patch.object(ArqRedis, "lpush", awaitable_exception()): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) - # TODO: How are errors persisted? Test persistence mechanism. score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) @@ -880,22 +932,19 @@ async def test_create_mapped_variants_for_scoreset_parsing_exception_retry_limit TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) - mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) - async def dummy_mapping_job(): + mapping_test_output_for_score_set = await setup_mapping_output(async_client, session, score_set) mapping_test_output_for_score_set.pop("computed_genomic_reference_sequence") return mapping_test_output_for_score_set # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines? - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", @@ -905,7 +954,6 @@ async def dummy_mapping_job(): standalone_worker_context, uuid4().hex, score_set.id, 1, BACKOFF_LIMIT + 1 ) - # TODO: How are errors persisted? Test persistence mechanism. score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() mapped_variants_for_score_set = session.scalars( select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) @@ -932,23 +980,22 @@ async def test_create_mapped_variants_for_scoreset_no_mapping_output( TEST_MINIMAL_SEQ_SCORESET, standalone_worker_context, ) - # We are skipping the manager in these cases to test directly on the mapping job, - # so pop from the queue as if the manager had executed. + # The call to `create_variants_from_score_set` within the above `setup_records_files_and_variants` will + # add a score set to the queue. Since we are executing the mapping independent of the manager job, we should + # sanitize the queue as if the mananger process had run. await sanitize_mapping_queue(standalone_worker_context, score_set) # Do not await, we need a co-routine object to be the return value of our `run_in_executor` mock. - mapping_test_output_for_score_set = setup_mapping_output(async_client, session, score_set, empty=True) + async def dummy_mapping_job(): + return await setup_mapping_output(async_client, session, score_set, empty=True) # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - # - # TODO: Does this work on non-unix based machines. - # TODO: Is it even a safe operation to patch this event loop method? with patch.object( _UnixSelectorEventLoop, "run_in_executor", - return_value=mapping_test_output_for_score_set, + return_value=dummy_mapping_job(), ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) @@ -1275,3 +1322,120 @@ async def test_mapping_manager_multiple_score_sets_occupy_queue_mapping_not_in_p assert score_set1.mapping_errors is None assert score_set2.mapping_errors is None assert score_set3.mapping_errors is None + + +@pytest.mark.asyncio +async def test_mapping_manager_enqueues_mapping_process_with_successful_mapping( + setup_worker_db, standalone_worker_context, session, async_client, data_files, arq_worker, arq_redis +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + async def dummy_mapping_job(): + return await setup_mapping_output(async_client, session, score_set) + + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ), patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0): + await arq_redis.enqueue_job("variant_mapper_manager", uuid4().hex, 1) + await arq_worker.async_run() + await arq_worker.run_check() + + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + assert (await arq_redis.llen(MAPPING_QUEUE_NAME)) == 0 + assert (await arq_redis.get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" + assert len(mapped_variants_for_score_set) == score_set.num_variants + assert score_set.mapping_state == MappingState.complete + assert score_set.mapping_errors is None + + +@pytest.mark.asyncio +async def test_mapping_manager_enqueues_mapping_process_with_retried_mapping_successful_mapping_on_retry( + setup_worker_db, standalone_worker_context, session, async_client, data_files, arq_worker, arq_redis +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + async def failed_mapping_job(): + return Exception() + + async def dummy_mapping_job(): + return await setup_mapping_output(async_client, session, score_set) + + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + side_effect=[failed_mapping_job(), dummy_mapping_job()], + ), patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0): + await arq_redis.enqueue_job("variant_mapper_manager", uuid4().hex, 1) + await arq_worker.async_run() + await arq_worker.run_check() + + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + assert (await arq_redis.llen(MAPPING_QUEUE_NAME)) == 0 + assert (await arq_redis.get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" + assert len(mapped_variants_for_score_set) == score_set.num_variants + assert score_set.mapping_state == MappingState.complete + assert score_set.mapping_errors is None + + +@pytest.mark.asyncio +async def test_mapping_manager_enqueues_mapping_process_with_unsuccessful_mapping( + setup_worker_db, standalone_worker_context, session, async_client, data_files, arq_worker, arq_redis +): + score_set = await setup_records_files_and_variants( + session, + async_client, + data_files, + TEST_MINIMAL_SEQ_SCORESET, + standalone_worker_context, + ) + + async def failed_mapping_job(): + return Exception() + + # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround + # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine + # object that sets up test mappingn output. + with patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + side_effect=[failed_mapping_job()] * 5, + ), patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0): + await arq_redis.enqueue_job("variant_mapper_manager", uuid4().hex, 1) + await arq_worker.async_run() + await arq_worker.run_check() + + score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() + mapped_variants_for_score_set = session.scalars( + select(MappedVariant).join(Variant).join(ScoreSetDbModel).filter(ScoreSetDbModel.urn == score_set.urn) + ).all() + assert (await arq_redis.llen(MAPPING_QUEUE_NAME)) == 0 + assert (await arq_redis.get(MAPPING_CURRENT_ID_NAME)).decode("utf-8") == "" + assert len(mapped_variants_for_score_set) == 0 + assert score_set.mapping_state == MappingState.failed + assert score_set.mapping_errors is not None From 4c114ae24018f6def1388c35a69ae932f12595a7 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 11 Oct 2024 15:22:58 -0700 Subject: [PATCH 54/58] Use Ruff for Linting and Formatting --- .flake8 | 11 -- .github/workflows/run-tests-on-push.yml | 14 ++ .pre-commit-config.yaml | 13 +- .../194cfebabe32_rename_wild_type_sequence.py | 2 +- ...42909_make_index_on_contributors_unique.py | 2 +- ...6f7_merge_76e1e55bc5c1_and_d7e6f8c3b9dc.py | 7 +- ...44a45d616036_make_index_on_users_unique.py | 2 +- ...ifiers_table_to_publication_identifiers.py | 2 +- .../8bcb2b4edc60_add_foreign_key_indices.py | 150 +++++++++--------- ...04a_merge_6fae83d65ee4_and_886e059ad1a8.py | 3 - .../versions/fecb3e0d181d_make_urns_unique.py | 22 +-- .../cdot/hgvs/dataproviders/__init__.pyi | 2 +- mypy_stubs/mavehgvs/__init__.py | 6 +- poetry.lock | 43 ++++- pyproject.toml | 18 ++- src/mavedb/lib/exceptions.py | 2 +- src/mavedb/lib/identifiers.py | 1 - src/mavedb/lib/orcid.py | 4 +- src/mavedb/lib/script_environment.py | 3 +- src/mavedb/lib/slack.py | 2 +- src/mavedb/lib/taxonomies.py | 30 ++-- src/mavedb/lib/validation/dataframe.py | 11 +- src/mavedb/models/score_set.py | 2 +- src/mavedb/models/target_accession.py | 3 +- src/mavedb/models/variant.py | 2 +- src/mavedb/routers/__init__.py | 2 +- src/mavedb/routers/mapped_variant.py | 4 +- src/mavedb/routers/statistics.py | 13 +- src/mavedb/server_main.py | 3 +- src/mavedb/view_models/contributor.py | 1 - src/mavedb/view_models/experiment.py | 1 + .../experiment_controlled_keyword.py | 10 +- src/mavedb/view_models/experiment_set.py | 2 +- src/mavedb/view_models/score_set.py | 6 +- src/mavedb/view_models/user.py | 1 - src/mavedb/worker/__init__.py | 2 +- src/mavedb/worker/jobs.py | 7 +- src/mavedb/worker/settings.py | 1 - tests/lib/test_authentication.py | 3 +- tests/routers/conftest.py | 9 +- tests/routers/test_access_keys.py | 2 - tests/routers/test_experiments.py | 14 +- tests/routers/test_hgvs.py | 1 - tests/routers/test_permissions.py | 108 ++++++++----- tests/routers/test_score_set.py | 23 +-- tests/routers/test_statistics.py | 8 +- tests/routers/test_users.py | 16 +- tests/validation/test_dataframe.py | 126 +++++++++++---- tests/validation/test_publication.py | 28 ++-- tests/view_models/test_target_gene.py | 85 +++++++--- tests/view_models/test_target_sequence.py | 16 +- tests/worker/test_jobs.py | 95 ++++++----- 52 files changed, 555 insertions(+), 389 deletions(-) delete mode 100644 .flake8 diff --git a/.flake8 b/.flake8 deleted file mode 100644 index f66aa7da..00000000 --- a/.flake8 +++ /dev/null @@ -1,11 +0,0 @@ -[flake8] -exclude = - .git - .pytest_cache - __pycache__ - alembic/versions - dist - venv -extend-ignore = E203 -max-complexity = 10 -max-line-length = 120 diff --git a/.github/workflows/run-tests-on-push.yml b/.github/workflows/run-tests-on-push.yml index 05f34f94..965ddfb3 100644 --- a/.github/workflows/run-tests-on-push.yml +++ b/.github/workflows/run-tests-on-push.yml @@ -61,3 +61,17 @@ jobs: - run: pip install poetry - run: poetry install --with dev --extras server - run: poetry run mypy src/ + + run-ruff-lint: + runs-on: ubuntu-latest + name: Ruff linting on Python 3.10 + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: "3.10" + cache: 'pip' + - run: pip install --upgrade pip + - run: pip install poetry + - run: poetry install --with dev --extras server + - run: poetry run ruff check diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 47bd73a5..529ceabb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,9 @@ repos: - - repo: https://github.com/psf/black - rev: 23.3.0 + - repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.6.8 hooks: - - id: black - # Latest version of Python supported by the project - # See https://pre-commit.com/#top_level-default_language_version - language_version: python3.9 + # Run the linter. + - id: ruff + # Run the formatter. + - id: ruff-format diff --git a/alembic/versions/194cfebabe32_rename_wild_type_sequence.py b/alembic/versions/194cfebabe32_rename_wild_type_sequence.py index c513045c..78607f58 100644 --- a/alembic/versions/194cfebabe32_rename_wild_type_sequence.py +++ b/alembic/versions/194cfebabe32_rename_wild_type_sequence.py @@ -5,8 +5,8 @@ Create Date: 2023-08-29 12:48:18.390567 """ + from alembic import op -import sqlalchemy as sa # revision identifiers, used by Alembic. diff --git a/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py b/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py index 298182b1..38a40b03 100644 --- a/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py +++ b/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py @@ -5,8 +5,8 @@ Create Date: 2024-09-03 09:53:21.635751 """ + from alembic import op -import sqlalchemy as sa # revision identifiers, used by Alembic. diff --git a/alembic/versions/1d4933b4b6f7_merge_76e1e55bc5c1_and_d7e6f8c3b9dc.py b/alembic/versions/1d4933b4b6f7_merge_76e1e55bc5c1_and_d7e6f8c3b9dc.py index 91353f2a..5ff04337 100644 --- a/alembic/versions/1d4933b4b6f7_merge_76e1e55bc5c1_and_d7e6f8c3b9dc.py +++ b/alembic/versions/1d4933b4b6f7_merge_76e1e55bc5c1_and_d7e6f8c3b9dc.py @@ -5,13 +5,10 @@ Create Date: 2024-09-04 16:17:20.875937 """ -from alembic import op -import sqlalchemy as sa - # revision identifiers, used by Alembic. -revision = '1d4933b4b6f7' -down_revision = ('76e1e55bc5c1', 'd7e6f8c3b9dc') +revision = "1d4933b4b6f7" +down_revision = ("76e1e55bc5c1", "d7e6f8c3b9dc") branch_labels = None depends_on = None diff --git a/alembic/versions/44a45d616036_make_index_on_users_unique.py b/alembic/versions/44a45d616036_make_index_on_users_unique.py index e01bd8b6..22c6239f 100644 --- a/alembic/versions/44a45d616036_make_index_on_users_unique.py +++ b/alembic/versions/44a45d616036_make_index_on_users_unique.py @@ -5,8 +5,8 @@ Create Date: 2024-08-16 13:25:14.980820 """ + from alembic import op -import sqlalchemy as sa # revision identifiers, used by Alembic. revision = "44a45d616036" diff --git a/alembic/versions/5cad62af3705_rename_pubmed_identifiers_table_to_publication_identifiers.py b/alembic/versions/5cad62af3705_rename_pubmed_identifiers_table_to_publication_identifiers.py index 50201c5b..ffee217c 100644 --- a/alembic/versions/5cad62af3705_rename_pubmed_identifiers_table_to_publication_identifiers.py +++ b/alembic/versions/5cad62af3705_rename_pubmed_identifiers_table_to_publication_identifiers.py @@ -5,8 +5,8 @@ Create Date: 2023-05-09 16:18:41.360541 """ + from alembic import op -import sqlalchemy as sa # revision identifiers, used by Alembic. diff --git a/alembic/versions/8bcb2b4edc60_add_foreign_key_indices.py b/alembic/versions/8bcb2b4edc60_add_foreign_key_indices.py index b42bea1d..5b5904de 100644 --- a/alembic/versions/8bcb2b4edc60_add_foreign_key_indices.py +++ b/alembic/versions/8bcb2b4edc60_add_foreign_key_indices.py @@ -5,90 +5,90 @@ Create Date: 2024-05-14 22:36:47.095490 """ + from alembic import op -import sqlalchemy as sa # revision identifiers, used by Alembic. -revision = '8bcb2b4edc60' -down_revision = '8e26f1a1160d' +revision = "8bcb2b4edc60" +down_revision = "8e26f1a1160d" branch_labels = None depends_on = None def upgrade(): - op.create_index(op.f('ix_access_keys_user_id'), 'access_keys', ['user_id'], unique=False) - op.drop_index('ix_doi_identifiers_id', table_name='doi_identifiers') - op.drop_index('ix_ensembl_identifiers_id', table_name='ensembl_identifiers') - op.drop_index('ix_experiment_sets_id', table_name='experiment_sets') - op.create_index(op.f('ix_experiment_sets_created_by_id'), 'experiment_sets', ['created_by_id'], unique=False) - op.create_index(op.f('ix_experiment_sets_modified_by_id'), 'experiment_sets', ['modified_by_id'], unique=False) - op.drop_index('ix_experiments_id', table_name='experiments') - op.create_index(op.f('ix_experiments_created_by_id'), 'experiments', ['created_by_id'], unique=False) - op.create_index(op.f('ix_experiments_experiment_set_id'), 'experiments', ['experiment_set_id'], unique=False) - op.create_index(op.f('ix_experiments_modified_by_id'), 'experiments', ['modified_by_id'], unique=False) - op.drop_index('ix_genome_identifiers_id', table_name='genome_identifiers') - op.drop_index('ix_keywords_id', table_name='keywords') - op.drop_index('ix_licenses_id', table_name='licenses') - op.drop_index('ix_mapped_variants_id', table_name='mapped_variants') - op.create_index(op.f('ix_mapped_variants_variant_id'), 'mapped_variants', ['variant_id'], unique=False) - op.drop_index('ix_publication_identifiers_id', table_name='publication_identifiers') - op.drop_index('ix_refseq_identifiers_id', table_name='refseq_identifiers') - op.drop_index('ix_roles_name', table_name='roles') - op.drop_index('ix_scoresets_id', table_name='scoresets') - op.create_index(op.f('ix_scoresets_created_by_id'), 'scoresets', ['created_by_id'], unique=False) - op.create_index(op.f('ix_scoresets_experiment_id'), 'scoresets', ['experiment_id'], unique=False) - op.create_index(op.f('ix_scoresets_licence_id'), 'scoresets', ['licence_id'], unique=False) - op.create_index(op.f('ix_scoresets_modified_by_id'), 'scoresets', ['modified_by_id'], unique=False) - op.create_index(op.f('ix_scoresets_replaces_id'), 'scoresets', ['replaces_id'], unique=False) - op.drop_index('ix_sra_identifiers_id', table_name='sra_identifiers') - op.drop_index('ix_target_genes_id', table_name='target_genes') - op.create_index(op.f('ix_target_genes_accession_id'), 'target_genes', ['accession_id'], unique=False) - op.create_index(op.f('ix_target_genes_scoreset_id'), 'target_genes', ['scoreset_id'], unique=False) - op.create_index(op.f('ix_target_genes_target_sequence_id'), 'target_genes', ['target_sequence_id'], unique=False) - op.drop_index('ix_target_sequences_id', table_name='target_sequences') - op.create_index(op.f('ix_target_sequences_taxonomy_id'), 'target_sequences', ['taxonomy_id'], unique=False) - op.drop_index('ix_taxonomies_id', table_name='taxonomies') - op.create_index(op.f('ix_taxonomies_genome_identifier_id'), 'taxonomies', ['genome_identifier_id'], unique=False) - op.drop_index('ix_uniprot_identifiers_id', table_name='uniprot_identifiers') - op.drop_index('ix_variants_id', table_name='variants') - op.create_index(op.f('ix_variants_scoreset_id'), 'variants', ['scoreset_id'], unique=False) + op.create_index(op.f("ix_access_keys_user_id"), "access_keys", ["user_id"], unique=False) + op.drop_index("ix_doi_identifiers_id", table_name="doi_identifiers") + op.drop_index("ix_ensembl_identifiers_id", table_name="ensembl_identifiers") + op.drop_index("ix_experiment_sets_id", table_name="experiment_sets") + op.create_index(op.f("ix_experiment_sets_created_by_id"), "experiment_sets", ["created_by_id"], unique=False) + op.create_index(op.f("ix_experiment_sets_modified_by_id"), "experiment_sets", ["modified_by_id"], unique=False) + op.drop_index("ix_experiments_id", table_name="experiments") + op.create_index(op.f("ix_experiments_created_by_id"), "experiments", ["created_by_id"], unique=False) + op.create_index(op.f("ix_experiments_experiment_set_id"), "experiments", ["experiment_set_id"], unique=False) + op.create_index(op.f("ix_experiments_modified_by_id"), "experiments", ["modified_by_id"], unique=False) + op.drop_index("ix_genome_identifiers_id", table_name="genome_identifiers") + op.drop_index("ix_keywords_id", table_name="keywords") + op.drop_index("ix_licenses_id", table_name="licenses") + op.drop_index("ix_mapped_variants_id", table_name="mapped_variants") + op.create_index(op.f("ix_mapped_variants_variant_id"), "mapped_variants", ["variant_id"], unique=False) + op.drop_index("ix_publication_identifiers_id", table_name="publication_identifiers") + op.drop_index("ix_refseq_identifiers_id", table_name="refseq_identifiers") + op.drop_index("ix_roles_name", table_name="roles") + op.drop_index("ix_scoresets_id", table_name="scoresets") + op.create_index(op.f("ix_scoresets_created_by_id"), "scoresets", ["created_by_id"], unique=False) + op.create_index(op.f("ix_scoresets_experiment_id"), "scoresets", ["experiment_id"], unique=False) + op.create_index(op.f("ix_scoresets_licence_id"), "scoresets", ["licence_id"], unique=False) + op.create_index(op.f("ix_scoresets_modified_by_id"), "scoresets", ["modified_by_id"], unique=False) + op.create_index(op.f("ix_scoresets_replaces_id"), "scoresets", ["replaces_id"], unique=False) + op.drop_index("ix_sra_identifiers_id", table_name="sra_identifiers") + op.drop_index("ix_target_genes_id", table_name="target_genes") + op.create_index(op.f("ix_target_genes_accession_id"), "target_genes", ["accession_id"], unique=False) + op.create_index(op.f("ix_target_genes_scoreset_id"), "target_genes", ["scoreset_id"], unique=False) + op.create_index(op.f("ix_target_genes_target_sequence_id"), "target_genes", ["target_sequence_id"], unique=False) + op.drop_index("ix_target_sequences_id", table_name="target_sequences") + op.create_index(op.f("ix_target_sequences_taxonomy_id"), "target_sequences", ["taxonomy_id"], unique=False) + op.drop_index("ix_taxonomies_id", table_name="taxonomies") + op.create_index(op.f("ix_taxonomies_genome_identifier_id"), "taxonomies", ["genome_identifier_id"], unique=False) + op.drop_index("ix_uniprot_identifiers_id", table_name="uniprot_identifiers") + op.drop_index("ix_variants_id", table_name="variants") + op.create_index(op.f("ix_variants_scoreset_id"), "variants", ["scoreset_id"], unique=False) def downgrade(): - op.drop_index(op.f('ix_variants_scoreset_id'), table_name='variants') - op.create_index('ix_variants_id', 'variants', ['id'], unique=False) - op.create_index('ix_uniprot_identifiers_id', 'uniprot_identifiers', ['id'], unique=False) - op.drop_index(op.f('ix_taxonomies_genome_identifier_id'), table_name='taxonomies') - op.create_index('ix_taxonomies_id', 'taxonomies', ['id'], unique=False) - op.drop_index(op.f('ix_target_sequences_taxonomy_id'), table_name='target_sequences') - op.create_index('ix_target_sequences_id', 'target_sequences', ['id'], unique=False) - op.drop_index(op.f('ix_target_genes_target_sequence_id'), table_name='target_genes') - op.drop_index(op.f('ix_target_genes_scoreset_id'), table_name='target_genes') - op.drop_index(op.f('ix_target_genes_accession_id'), table_name='target_genes') - op.create_index('ix_target_genes_id', 'target_genes', ['id'], unique=False) - op.create_index('ix_sra_identifiers_id', 'sra_identifiers', ['id'], unique=False) - op.drop_index(op.f('ix_scoresets_replaces_id'), table_name='scoresets') - op.drop_index(op.f('ix_scoresets_modified_by_id'), table_name='scoresets') - op.drop_index(op.f('ix_scoresets_licence_id'), table_name='scoresets') - op.drop_index(op.f('ix_scoresets_experiment_id'), table_name='scoresets') - op.drop_index(op.f('ix_scoresets_created_by_id'), table_name='scoresets') - op.create_index('ix_scoresets_id', 'scoresets', ['id'], unique=False) - op.create_index('ix_roles_name', 'roles', ['name'], unique=False) - op.create_index('ix_refseq_identifiers_id', 'refseq_identifiers', ['id'], unique=False) - op.create_index('ix_publication_identifiers_id', 'publication_identifiers', ['id'], unique=False) - op.drop_index(op.f('ix_mapped_variants_variant_id'), table_name='mapped_variants') - op.create_index('ix_mapped_variants_id', 'mapped_variants', ['id'], unique=False) - op.create_index('ix_licenses_id', 'licenses', ['id'], unique=False) - op.create_index('ix_keywords_id', 'keywords', ['id'], unique=False) - op.create_index('ix_genome_identifiers_id', 'genome_identifiers', ['id'], unique=False) - op.drop_index(op.f('ix_experiments_modified_by_id'), table_name='experiments') - op.drop_index(op.f('ix_experiments_experiment_set_id'), table_name='experiments') - op.drop_index(op.f('ix_experiments_created_by_id'), table_name='experiments') - op.create_index('ix_experiments_id', 'experiments', ['id'], unique=False) - op.drop_index(op.f('ix_experiment_sets_modified_by_id'), table_name='experiment_sets') - op.drop_index(op.f('ix_experiment_sets_created_by_id'), table_name='experiment_sets') - op.create_index('ix_experiment_sets_id', 'experiment_sets', ['id'], unique=False) - op.create_index('ix_ensembl_identifiers_id', 'ensembl_identifiers', ['id'], unique=False) - op.create_index('ix_doi_identifiers_id', 'doi_identifiers', ['id'], unique=False) - op.drop_index(op.f('ix_access_keys_user_id'), table_name='access_keys') + op.drop_index(op.f("ix_variants_scoreset_id"), table_name="variants") + op.create_index("ix_variants_id", "variants", ["id"], unique=False) + op.create_index("ix_uniprot_identifiers_id", "uniprot_identifiers", ["id"], unique=False) + op.drop_index(op.f("ix_taxonomies_genome_identifier_id"), table_name="taxonomies") + op.create_index("ix_taxonomies_id", "taxonomies", ["id"], unique=False) + op.drop_index(op.f("ix_target_sequences_taxonomy_id"), table_name="target_sequences") + op.create_index("ix_target_sequences_id", "target_sequences", ["id"], unique=False) + op.drop_index(op.f("ix_target_genes_target_sequence_id"), table_name="target_genes") + op.drop_index(op.f("ix_target_genes_scoreset_id"), table_name="target_genes") + op.drop_index(op.f("ix_target_genes_accession_id"), table_name="target_genes") + op.create_index("ix_target_genes_id", "target_genes", ["id"], unique=False) + op.create_index("ix_sra_identifiers_id", "sra_identifiers", ["id"], unique=False) + op.drop_index(op.f("ix_scoresets_replaces_id"), table_name="scoresets") + op.drop_index(op.f("ix_scoresets_modified_by_id"), table_name="scoresets") + op.drop_index(op.f("ix_scoresets_licence_id"), table_name="scoresets") + op.drop_index(op.f("ix_scoresets_experiment_id"), table_name="scoresets") + op.drop_index(op.f("ix_scoresets_created_by_id"), table_name="scoresets") + op.create_index("ix_scoresets_id", "scoresets", ["id"], unique=False) + op.create_index("ix_roles_name", "roles", ["name"], unique=False) + op.create_index("ix_refseq_identifiers_id", "refseq_identifiers", ["id"], unique=False) + op.create_index("ix_publication_identifiers_id", "publication_identifiers", ["id"], unique=False) + op.drop_index(op.f("ix_mapped_variants_variant_id"), table_name="mapped_variants") + op.create_index("ix_mapped_variants_id", "mapped_variants", ["id"], unique=False) + op.create_index("ix_licenses_id", "licenses", ["id"], unique=False) + op.create_index("ix_keywords_id", "keywords", ["id"], unique=False) + op.create_index("ix_genome_identifiers_id", "genome_identifiers", ["id"], unique=False) + op.drop_index(op.f("ix_experiments_modified_by_id"), table_name="experiments") + op.drop_index(op.f("ix_experiments_experiment_set_id"), table_name="experiments") + op.drop_index(op.f("ix_experiments_created_by_id"), table_name="experiments") + op.create_index("ix_experiments_id", "experiments", ["id"], unique=False) + op.drop_index(op.f("ix_experiment_sets_modified_by_id"), table_name="experiment_sets") + op.drop_index(op.f("ix_experiment_sets_created_by_id"), table_name="experiment_sets") + op.create_index("ix_experiment_sets_id", "experiment_sets", ["id"], unique=False) + op.create_index("ix_ensembl_identifiers_id", "ensembl_identifiers", ["id"], unique=False) + op.create_index("ix_doi_identifiers_id", "doi_identifiers", ["id"], unique=False) + op.drop_index(op.f("ix_access_keys_user_id"), table_name="access_keys") diff --git a/alembic/versions/b3767156e04a_merge_6fae83d65ee4_and_886e059ad1a8.py b/alembic/versions/b3767156e04a_merge_6fae83d65ee4_and_886e059ad1a8.py index 7b1b5499..35c5b011 100644 --- a/alembic/versions/b3767156e04a_merge_6fae83d65ee4_and_886e059ad1a8.py +++ b/alembic/versions/b3767156e04a_merge_6fae83d65ee4_and_886e059ad1a8.py @@ -5,9 +5,6 @@ Create Date: 2024-04-15 11:24:54.269178 """ -from alembic import op -import sqlalchemy as sa - # revision identifiers, used by Alembic. revision = "b3767156e04a" diff --git a/alembic/versions/fecb3e0d181d_make_urns_unique.py b/alembic/versions/fecb3e0d181d_make_urns_unique.py index 35078bf4..83734b6c 100644 --- a/alembic/versions/fecb3e0d181d_make_urns_unique.py +++ b/alembic/versions/fecb3e0d181d_make_urns_unique.py @@ -5,30 +5,30 @@ Create Date: 2023-11-15 19:45:42.769529 """ + from alembic import op -import sqlalchemy as sa # revision identifiers, used by Alembic. -revision = 'fecb3e0d181d' -down_revision = 'c6154dd7d9b9' +revision = "fecb3e0d181d" +down_revision = "c6154dd7d9b9" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.create_index(op.f('ix_experiment_sets_urn'), 'experiment_sets', ['urn'], unique=True) - op.create_index(op.f('ix_experiments_urn'), 'experiments', ['urn'], unique=True) - op.create_index(op.f('ix_scoresets_urn'), 'scoresets', ['urn'], unique=True) - op.create_index(op.f('ix_variants_urn'), 'variants', ['urn'], unique=True) + op.create_index(op.f("ix_experiment_sets_urn"), "experiment_sets", ["urn"], unique=True) + op.create_index(op.f("ix_experiments_urn"), "experiments", ["urn"], unique=True) + op.create_index(op.f("ix_scoresets_urn"), "scoresets", ["urn"], unique=True) + op.create_index(op.f("ix_variants_urn"), "variants", ["urn"], unique=True) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.drop_index(op.f('ix_variants_urn'), table_name='variants') - op.drop_index(op.f('ix_scoresets_urn'), table_name='scoresets') - op.drop_index(op.f('ix_experiments_urn'), table_name='experiments') - op.drop_index(op.f('ix_experiment_sets_urn'), table_name='experiment_sets') + op.drop_index(op.f("ix_variants_urn"), table_name="variants") + op.drop_index(op.f("ix_scoresets_urn"), table_name="scoresets") + op.drop_index(op.f("ix_experiments_urn"), table_name="experiments") + op.drop_index(op.f("ix_experiment_sets_urn"), table_name="experiment_sets") # ### end Alembic commands ### diff --git a/mypy_stubs/cdot/hgvs/dataproviders/__init__.pyi b/mypy_stubs/cdot/hgvs/dataproviders/__init__.pyi index 1d4ab178..6c53fe9f 100644 --- a/mypy_stubs/cdot/hgvs/dataproviders/__init__.pyi +++ b/mypy_stubs/cdot/hgvs/dataproviders/__init__.pyi @@ -1,3 +1,3 @@ +# ruff: noqa: F403 from .fasta_seqfetcher import * from .json_data_provider import * -from hgvs.dataproviders.seqfetcher import SeqFetcher diff --git a/mypy_stubs/mavehgvs/__init__.py b/mypy_stubs/mavehgvs/__init__.py index 3ff1a657..a25b6333 100644 --- a/mypy_stubs/mavehgvs/__init__.py +++ b/mypy_stubs/mavehgvs/__init__.py @@ -1,3 +1,3 @@ -from .variant import Variant -from .position import VariantPosition -from .exceptions import MaveHgvsParseError +from .variant import Variant as Variant +from .position import VariantPosition as VariantPosition +from .exceptions import MaveHgvsParseError as MaveHgvsParseError diff --git a/poetry.lock b/poetry.lock index f8832cbf..a946956f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -714,13 +714,13 @@ crt = ["awscrt (==0.21.2)"] [[package]] name = "botocore-stubs" -version = "1.35.37" +version = "1.35.39" description = "Type annotations and code completion for botocore" optional = false python-versions = ">=3.8" files = [ - {file = "botocore_stubs-1.35.37-py3-none-any.whl", hash = "sha256:0fd4ce53636196fcb72b8dad1c67cb774f2f1941891a9c5293f91401ff6d8589"}, - {file = "botocore_stubs-1.35.37.tar.gz", hash = "sha256:834f1d55c8e8a815bbb446fe9a7d3da09c4402312ff1a8fcf13fb6b4b894ab92"}, + {file = "botocore_stubs-1.35.39-py3-none-any.whl", hash = "sha256:62b0518ea3056d76e00fa2b30a5de38b9c70eaf1058a2ed5f34bc208222f1b70"}, + {file = "botocore_stubs-1.35.39.tar.gz", hash = "sha256:0d628444a15b94fb7284cd3cc34ba6f6bb7a076a319992d2f19111c644de4dba"}, ] [package.dependencies] @@ -2232,13 +2232,13 @@ files = [ [[package]] name = "mirakuru" -version = "2.5.2" +version = "2.5.3" description = "Process executor (not only) for tests." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "mirakuru-2.5.2-py3-none-any.whl", hash = "sha256:90c2d90a8cf14349b2f33e6db30a16acd855499811e0312e56cf80ceacf2d3e5"}, - {file = "mirakuru-2.5.2.tar.gz", hash = "sha256:41ca583d355eb7a6cfdc21c1aea549979d685c27b57239b88725434f115a7132"}, + {file = "mirakuru-2.5.3-py3-none-any.whl", hash = "sha256:2fab68356fb98fb5358ea3ab65f5e511f34b5a0b16cfd0a0935ef15a3393f025"}, + {file = "mirakuru-2.5.3.tar.gz", hash = "sha256:39b33f8fcdf13764a6cfe936e0feeead3902a161fec438df3be7cce98f7933c6"}, ] [package.dependencies] @@ -3242,6 +3242,33 @@ files = [ [package.dependencies] pyasn1 = ">=0.1.3" +[[package]] +name = "ruff" +version = "0.6.9" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +files = [ + {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, + {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, + {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, + {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, + {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, + {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, + {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, +] + [[package]] name = "s3transfer" version = "0.10.3" @@ -4199,4 +4226,4 @@ server = ["alembic", "arq", "authlib", "biocommons", "boto3", "cdot", "cryptogra [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "10ebc4bcf807aea1ec077b25aac0ed9fce82853b30f1b8b9879f22d14ff8c7a8" +content-hash = "ba27041cc125b2646364feaa3b04821f6f786da99e2cc7047f4ffa4a0b144ce3" diff --git a/pyproject.toml b/pyproject.toml index 624d9db5..99171429 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -64,9 +64,7 @@ watchtower = { version = "~3.2.0", optional = true } optional = true [tool.poetry.group.dev.dependencies] -black = "*" boto3-stubs = "~1.34.97" -flake8 = "*" mypy = "~1.10.0" pre-commit = "*" jsonschema = "*" @@ -82,6 +80,7 @@ types-python-jose = "~3.3.4" types-PyYAML = "~6.0.12.20240808" redis = "~5.0.2" requests-mock = "~1.11.0" +ruff = "^0.6.8" SQLAlchemy = { extras = ["mypy"], version = "~2.0.0" } @@ -89,11 +88,6 @@ SQLAlchemy = { extras = ["mypy"], version = "~2.0.0" } server = ["alembic", "arq", "authlib", "biocommons", "boto3", "cdot", "cryptography", "fastapi", "hgvs", "orcid", "psycopg2", "python-jose", "python-multipart", "requests", "starlette", "starlette-context", "slack-sdk", "uvicorn", "watchtower"] -[tool.black] -extend-exclude = "alembic/versions" -line-length = 120 - - [tool.mypy] plugins = [ "sqlalchemy.ext.mypy.plugin", @@ -109,3 +103,13 @@ norecursedirs = "tests/helpers/" # Uncomment the following lines to include application log output in Pytest logs. # log_cli = true # log_cli_level = "DEBUG" + +[tool.ruff] +# target python 3.9 and above +target-version = "py39" + +# max line length for linting is 120 characters +line-length = 120 + +# Exclude these files from linting/formatting +exclude = ["alembic/manual_migrations"] diff --git a/src/mavedb/lib/exceptions.py b/src/mavedb/lib/exceptions.py index 004cab6f..27759438 100644 --- a/src/mavedb/lib/exceptions.py +++ b/src/mavedb/lib/exceptions.py @@ -61,7 +61,7 @@ def __init__(self, message, code=None, params=None): # PY2 has a `message` property which is always there so we can't # duck-type on it. It was introduced in Python 2.5 and already # deprecated in Python 2.6. - elif not hasattr(message, "message" if six.PY3 else "code"): + elif not hasattr(message, "message" if six.PY3 else "code"): # noqa: F821 message = message.error_list else: message, code, params = message.message, message.code, message.params diff --git a/src/mavedb/lib/identifiers.py b/src/mavedb/lib/identifiers.py index b4162924..d44b02c3 100644 --- a/src/mavedb/lib/identifiers.py +++ b/src/mavedb/lib/identifiers.py @@ -1,5 +1,4 @@ import os -from datetime import date from typing import Optional, Union, Mapping import eutils # type: ignore diff --git a/src/mavedb/lib/orcid.py b/src/mavedb/lib/orcid.py index a5b4b629..7b713e17 100644 --- a/src/mavedb/lib/orcid.py +++ b/src/mavedb/lib/orcid.py @@ -30,7 +30,7 @@ def fetch_orcid_user(orcid_id: str) -> Optional[OrcidUser]: family_name=record["person"]["name"]["family-name"]["value"], ) logger.debug(msg="Successfully fetched ORCID user.", extra=logging_context()) - except: + except Exception: logger.debug( msg="Failed to fetch ORCID user; User exists but is name metadata not visible.", extra=logging_context() ) @@ -60,7 +60,7 @@ def fetch_orcid_user_email(orcid_id: str) -> Optional[str]: try: email = record["person"]["emails"]["email"][0]["email"] logger.debug(msg="Successfully fetched ORCID email.", extra=logging_context()) - except: + except Exception: logger.debug(msg="Failed to fetch ORCID email; User exists but email not visible.", extra=logging_context()) except Exception as e: diff --git a/src/mavedb/lib/script_environment.py b/src/mavedb/lib/script_environment.py index d94662bd..7761775c 100644 --- a/src/mavedb/lib/script_environment.py +++ b/src/mavedb/lib/script_environment.py @@ -1,10 +1,11 @@ """ Environment setup for scripts. """ + from sqlalchemy.orm import configure_mappers, Session from mavedb import deps -from mavedb.models import * +from mavedb.models import * # noqa: F403 def init_script_environment() -> Session: diff --git a/src/mavedb/lib/slack.py b/src/mavedb/lib/slack.py index f9ea19c1..a7979296 100644 --- a/src/mavedb/lib/slack.py +++ b/src/mavedb/lib/slack.py @@ -27,7 +27,7 @@ def send_slack_message(err, request=None): slack_webhook_url = os.getenv("SLACK_WEBHOOK_URL") if slack_webhook_url is not None and len(slack_webhook_url) > 0: client = WebhookClient(url=slack_webhook_url) - response = client.send( + client.send( text=text, blocks=[ { diff --git a/src/mavedb/lib/taxonomies.py b/src/mavedb/lib/taxonomies.py index e2bc1687..e33ec802 100644 --- a/src/mavedb/lib/taxonomies.py +++ b/src/mavedb/lib/taxonomies.py @@ -44,25 +44,27 @@ async def search_NCBI_taxonomy(db: Session, search: str) -> Any: return None # Process the retrieved data as needed - ncbi_taxonomy = data['taxonomy_nodes'][0]['taxonomy'] - ncbi_taxonomy.setdefault('organism_name', 'NULL') - ncbi_taxonomy.setdefault('common_name', 'NULL') - ncbi_taxonomy.setdefault('rank', 'NULL') - ncbi_taxonomy.setdefault('has_described_species_name', False) - taxonomy_record = Taxonomy(tax_id=ncbi_taxonomy['tax_id'], - organism_name=ncbi_taxonomy['organism_name'], - common_name=ncbi_taxonomy['common_name'], - rank=ncbi_taxonomy['rank'], - has_described_species_name=ncbi_taxonomy['has_described_species_name'], - url="https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=" + str( - ncbi_taxonomy['tax_id']), - article_reference="NCBI:txid" + str(ncbi_taxonomy['tax_id'])) + ncbi_taxonomy = data["taxonomy_nodes"][0]["taxonomy"] + ncbi_taxonomy.setdefault("organism_name", "NULL") + ncbi_taxonomy.setdefault("common_name", "NULL") + ncbi_taxonomy.setdefault("rank", "NULL") + ncbi_taxonomy.setdefault("has_described_species_name", False) + taxonomy_record = Taxonomy( + tax_id=ncbi_taxonomy["tax_id"], + organism_name=ncbi_taxonomy["organism_name"], + common_name=ncbi_taxonomy["common_name"], + rank=ncbi_taxonomy["rank"], + has_described_species_name=ncbi_taxonomy["has_described_species_name"], + url="https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=" + + str(ncbi_taxonomy["tax_id"]), + article_reference="NCBI:txid" + str(ncbi_taxonomy["tax_id"]), + ) db.add(taxonomy_record) db.commit() db.refresh(taxonomy_record) else: raise HTTPException(status_code=404, detail=f"Taxonomy with search {search_text} not found in NCBI") else: - raise HTTPException(status_code=404, detail=f"Please enter valid searching words") + raise HTTPException(status_code=404, detail="Please enter valid searching words") return taxonomy_record diff --git a/src/mavedb/lib/validation/dataframe.py b/src/mavedb/lib/validation/dataframe.py index b02e8258..c4a90b1b 100644 --- a/src/mavedb/lib/validation/dataframe.py +++ b/src/mavedb/lib/validation/dataframe.py @@ -208,11 +208,16 @@ def validate_dataframe(df: pd.DataFrame, kind: str, targets: list["TargetGene"], # This is typesafe, despite Pylance's claims otherwise if score_set_is_accession_based and not score_set_is_sequence_based: validate_hgvs_genomic_column( - df[column_mapping[c]], is_index, [target.target_accession for target in targets], hdp # type: ignore + df[column_mapping[c]], + is_index, + [target.target_accession for target in targets], + hdp, # type: ignore ) elif score_set_is_sequence_based and not score_set_is_accession_based: validate_hgvs_transgenic_column( - df[column_mapping[c]], is_index, {target.target_sequence.label: target.target_sequence for target in targets} # type: ignore + df[column_mapping[c]], + is_index, + {target.target_sequence.label: target.target_sequence for target in targets}, # type: ignore ) else: raise MixedTargetError("Could not validate dataframe against provided mixed target types.") @@ -262,7 +267,7 @@ def validate_column_names(df: pd.DataFrame, kind: str) -> None: ValidationError If the column names are not valid """ - if any(type(c) != str for c in df.columns): + if any(type(c) is not str for c in df.columns): raise ValidationError("column names must be strings") if any(c.isspace() for c in df.columns) or any(len(c) == 0 for c in df.columns): diff --git a/src/mavedb/models/score_set.py b/src/mavedb/models/score_set.py index 01e3ec43..44cd0802 100644 --- a/src/mavedb/models/score_set.py +++ b/src/mavedb/models/score_set.py @@ -1,6 +1,6 @@ from datetime import date from sqlalchemy import Boolean, Column, Date, Enum, ForeignKey, Integer, String -from sqlalchemy.orm import relationship, backref, Mapped +from sqlalchemy.orm import relationship, Mapped from sqlalchemy.ext.associationproxy import association_proxy, AssociationProxy from sqlalchemy.schema import Table from sqlalchemy.dialects.postgresql import JSONB diff --git a/src/mavedb/models/target_accession.py b/src/mavedb/models/target_accession.py index 35992c6b..e054a50f 100644 --- a/src/mavedb/models/target_accession.py +++ b/src/mavedb/models/target_accession.py @@ -1,7 +1,6 @@ from datetime import date -from sqlalchemy import Column, Date, Integer, String, ForeignKey -from sqlalchemy.orm import backref, relationship +from sqlalchemy import Column, Date, Integer, String from mavedb.db.base import Base diff --git a/src/mavedb/models/variant.py b/src/mavedb/models/variant.py index c3ddca21..b2d1e87d 100644 --- a/src/mavedb/models/variant.py +++ b/src/mavedb/models/variant.py @@ -1,7 +1,7 @@ from datetime import date from sqlalchemy import Column, Date, ForeignKey, Integer, String -from sqlalchemy.orm import relationship, backref, Mapped +from sqlalchemy.orm import relationship, Mapped from sqlalchemy.dialects.postgresql import JSONB from mavedb.db.base import Base diff --git a/src/mavedb/routers/__init__.py b/src/mavedb/routers/__init__.py index cb7e81af..ae322c54 100644 --- a/src/mavedb/routers/__init__.py +++ b/src/mavedb/routers/__init__.py @@ -1,2 +1,2 @@ # Import view models up front so any deferred forward references are resolved prior to OpenAPI spec generation. -import mavedb.view_models +from mavedb import view_models as view_models diff --git a/src/mavedb/routers/mapped_variant.py b/src/mavedb/routers/mapped_variant.py index d29883bb..8a87061b 100644 --- a/src/mavedb/routers/mapped_variant.py +++ b/src/mavedb/routers/mapped_variant.py @@ -1,6 +1,5 @@ -from typing import Any, List, Optional +from typing import Any, Optional -from arq import ArqRedis from fastapi import APIRouter, Depends from fastapi.exceptions import HTTPException from sqlalchemy.orm import Session @@ -11,7 +10,6 @@ from mavedb.models.mapped_variant import MappedVariant from mavedb.models.variant import Variant from mavedb.view_models import mapped_variant -from mavedb.worker.jobs import MAPPING_QUEUE_NAME async def fetch_mapped_variant_by_variant_urn(db, urn: str) -> Optional[MappedVariant]: diff --git a/src/mavedb/routers/statistics.py b/src/mavedb/routers/statistics.py index 29445fe2..a6ef326e 100644 --- a/src/mavedb/routers/statistics.py +++ b/src/mavedb/routers/statistics.py @@ -2,7 +2,7 @@ from fastapi import APIRouter, Depends, HTTPException from sqlalchemy import func, Table, select from sqlalchemy.orm import Session -from typing import Any, Union +from typing import Union from mavedb.deps import get_db from mavedb.models.controlled_keyword import ControlledKeyword @@ -229,8 +229,10 @@ def _record_from_field_and_model( dict[ RecordFields, Union[ - Table, type[ExperimentControlledKeywordAssociation], type[ExperimentPublicationIdentifierAssociation], - type[ScoreSetPublicationIdentifierAssociation] + Table, + type[ExperimentControlledKeywordAssociation], + type[ExperimentPublicationIdentifierAssociation], + type[ScoreSetPublicationIdentifierAssociation], ], ], ] = { @@ -334,8 +336,9 @@ def record_object_statistics( """ # Validation to ensure 'keywords' is only used with 'experiment'. if model == RecordNames.scoreSet and field == RecordFields.keywords: - raise HTTPException(status_code=422, - detail="The 'keywords' field can only be used with the 'experiment' model.") + raise HTTPException( + status_code=422, detail="The 'keywords' field can only be used with the 'experiment' model." + ) count_data = _record_from_field_and_model(db, model, field) diff --git a/src/mavedb/server_main.py b/src/mavedb/server_main.py index b8429d1d..90a83ad4 100644 --- a/src/mavedb/server_main.py +++ b/src/mavedb/server_main.py @@ -14,7 +14,7 @@ from starlette_context.plugins import CorrelationIdPlugin, RequestIdPlugin, UserAgentPlugin from eutils._internal.exceptions import EutilsRequestError # type: ignore -from mavedb.models import * +from mavedb.models import * # noqa: F403 from mavedb import __version__ from mavedb.lib.logging.context import ( @@ -33,7 +33,6 @@ experiments, hgvs, licenses, - log, mapped_variant, orcid, permissions, diff --git a/src/mavedb/view_models/contributor.py b/src/mavedb/view_models/contributor.py index ca732cf9..bb7f42ed 100644 --- a/src/mavedb/view_models/contributor.py +++ b/src/mavedb/view_models/contributor.py @@ -1,6 +1,5 @@ from typing import Optional -from pydantic import Field from mavedb.view_models.base.base import BaseModel diff --git a/src/mavedb/view_models/experiment.py b/src/mavedb/view_models/experiment.py index 7e98325c..4caaf54c 100644 --- a/src/mavedb/view_models/experiment.py +++ b/src/mavedb/view_models/experiment.py @@ -147,6 +147,7 @@ class ExperimentPublicDump(SavedExperiment): score_sets: "Sequence[ScoreSetPublicDump]" +# ruff: noqa: E402 from mavedb.view_models.score_set import ScoreSetPublicDump ExperimentPublicDump.update_forward_refs() diff --git a/src/mavedb/view_models/experiment_controlled_keyword.py b/src/mavedb/view_models/experiment_controlled_keyword.py index f6b11469..d509ca90 100644 --- a/src/mavedb/view_models/experiment_controlled_keyword.py +++ b/src/mavedb/view_models/experiment_controlled_keyword.py @@ -1,4 +1,4 @@ -from mavedb.view_models.base.base import BaseModel, validator +from mavedb.view_models.base.base import BaseModel from mavedb.view_models import keyword from mavedb.lib.validation import keywords @@ -8,6 +8,7 @@ class ExperimentControlledKeywordBase(BaseModel): """Base class for experiment and controlled keyword bridge table view models.""" + keyword: keyword.KeywordBase description: Optional[str] @@ -18,18 +19,20 @@ def validate_fields(cls, values): # validated_keyword possible value: {'key': 'Delivery method', 'value': None} # Validate if keyword value is other, whether description is None. - if validated_keyword and validated_keyword['value']: - keywords.validate_description(validated_keyword['value'], validated_keyword['key'], validated_description) + if validated_keyword and validated_keyword["value"]: + keywords.validate_description(validated_keyword["value"], validated_keyword["key"], validated_description) return values class ExperimentControlledKeywordCreate(ExperimentControlledKeywordBase): """View model for creating a new keyword.""" + keyword: keyword.KeywordCreate class ExperimentControlledKeywordUpdate(ExperimentControlledKeywordBase): """View model for updating a keyword.""" + pass @@ -42,4 +45,5 @@ class Config: class ExperimentControlledKeyword(SavedExperimentControlledKeyword): """Keyword view model for non-admin clients.""" + pass diff --git a/src/mavedb/view_models/experiment_set.py b/src/mavedb/view_models/experiment_set.py index efbe6062..d53167f9 100644 --- a/src/mavedb/view_models/experiment_set.py +++ b/src/mavedb/view_models/experiment_set.py @@ -1,5 +1,5 @@ from datetime import date -from typing import List, Sequence +from typing import Sequence from pydantic.types import Optional diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index cff67ba7..20e8e21c 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -2,8 +2,8 @@ from __future__ import annotations from datetime import date -from pydantic import root_validator, conlist -from typing import Collection, Dict, Literal, Optional, Any, Sequence +from pydantic import root_validator +from typing import Collection, Dict, Optional, Any, Sequence from humps import camelize @@ -13,7 +13,6 @@ from mavedb.lib.validation.utilities import is_null, inf_or_float from mavedb.models.enums.processing_state import ProcessingState from mavedb.models.enums.mapping_state import MappingState -from mavedb.models.target_sequence import TargetSequence from mavedb.view_models import PublicationIdentifiersGetter from mavedb.view_models.base.base import BaseModel, validator from mavedb.view_models.contributor import Contributor, ContributorCreate @@ -437,6 +436,7 @@ class ScoreSetPublicDump(SavedScoreSet): mapping_errors: Optional[Dict] +# ruff: noqa: E402 from mavedb.view_models.experiment import Experiment ShortScoreSet.update_forward_refs() diff --git a/src/mavedb/view_models/user.py b/src/mavedb/view_models/user.py index bb44456f..9f6256c0 100644 --- a/src/mavedb/view_models/user.py +++ b/src/mavedb/view_models/user.py @@ -5,7 +5,6 @@ from mavedb.models.enums.user_role import UserRole from mavedb.lib.validation.exceptions import ValidationError -from mavedb.models.enums.user_role import UserRole from mavedb.view_models.base.base import BaseModel, validator diff --git a/src/mavedb/worker/__init__.py b/src/mavedb/worker/__init__.py index c94bf137..626f42c7 100644 --- a/src/mavedb/worker/__init__.py +++ b/src/mavedb/worker/__init__.py @@ -1,6 +1,6 @@ from sqlalchemy.orm import configure_mappers -from mavedb.models import * +from mavedb.models import * # noqa: F403 from mavedb.worker.settings import ArqWorkerSettings diff --git a/src/mavedb/worker/jobs.py b/src/mavedb/worker/jobs.py index 8b8ae5b6..e1936073 100644 --- a/src/mavedb/worker/jobs.py +++ b/src/mavedb/worker/jobs.py @@ -10,7 +10,6 @@ from arq import ArqRedis from arq.jobs import Job, JobStatus from cdot.hgvs.dataproviders import RESTDataProvider -from fqfa.util.translate import translate_dna from sqlalchemy import cast, delete, select, null from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import Session @@ -31,8 +30,6 @@ from mavedb.models.enums.processing_state import ProcessingState from mavedb.models.mapped_variant import MappedVariant from mavedb.models.score_set import ScoreSet -from mavedb.models.target_gene import TargetGene -from mavedb.models.target_sequence import TargetSequence from mavedb.models.user import User from mavedb.models.variant import Variant from mavedb.data_providers.services import vrs_mapper @@ -369,9 +366,9 @@ async def map_variants_for_score_set( mapped_protein_ref = mapping_results.get("mapped_protein_reference_sequence") if computed_genomic_ref: - target_sequence = computed_genomic_ref["sequence"] + target_sequence = computed_genomic_ref["sequence"] # noqa: F841 elif computed_protein_ref: - target_sequence = computed_protein_ref["sequence"] + target_sequence = computed_protein_ref["sequence"] # noqa: F841 else: raise NonexistentMappingReferenceError() diff --git a/src/mavedb/worker/settings.py b/src/mavedb/worker/settings.py index 1b181123..56c82386 100644 --- a/src/mavedb/worker/settings.py +++ b/src/mavedb/worker/settings.py @@ -4,7 +4,6 @@ from arq.connections import RedisSettings from arq.cron import CronJob -from arq import ArqRedis, cron from mavedb.lib.logging.canonical import log_job from mavedb.worker.jobs import create_variants_for_score_set, map_variants_for_score_set, variant_mapper_manager diff --git a/tests/lib/test_authentication.py b/tests/lib/test_authentication.py index 3fd1e1e8..16e35f52 100644 --- a/tests/lib/test_authentication.py +++ b/tests/lib/test_authentication.py @@ -1,7 +1,6 @@ import pytest from fastapi import HTTPException -from sqlalchemy.exc import MultipleResultsFound from unittest.mock import patch from mavedb.lib.authentication import get_current_user_data_from_api_key, get_current_user @@ -34,7 +33,7 @@ async def test_get_current_user_data_from_key_invalid_token(session, setup_lib_d @pytest.mark.asyncio async def test_get_current_user_data_from_key_nonetype_token(session, setup_lib_db, client): - access_key = create_api_key_for_current_user(client) + create_api_key_for_current_user(client) user_data = await get_current_user_data_from_api_key(session, None) assert user_data is None diff --git a/tests/routers/conftest.py b/tests/routers/conftest.py index 4cbe4971..6f7f746d 100644 --- a/tests/routers/conftest.py +++ b/tests/routers/conftest.py @@ -4,7 +4,6 @@ import cdot.hgvs.dataproviders import pytest -import requests_mock from mavedb.models.controlled_keyword import ControlledKeyword from mavedb.models.enums.user_role import UserRole @@ -13,7 +12,13 @@ from mavedb.models.role import Role from mavedb.models.user import User from tests.helpers.constants import ( - ADMIN_USER, EXTRA_USER, TEST_CDOT_TRANSCRIPT, TEST_DB_KEYWORDS, TEST_LICENSE, TEST_TAXONOMY, TEST_USER + ADMIN_USER, + EXTRA_USER, + TEST_CDOT_TRANSCRIPT, + TEST_DB_KEYWORDS, + TEST_LICENSE, + TEST_TAXONOMY, + TEST_USER, ) from tests.helpers.util import ( create_acc_score_set_with_variants, diff --git a/tests/routers/test_access_keys.py b/tests/routers/test_access_keys.py index 00bae9e0..79b053ed 100644 --- a/tests/routers/test_access_keys.py +++ b/tests/routers/test_access_keys.py @@ -1,5 +1,3 @@ -import pytest - from tests.helpers.constants import EXTRA_USER from tests.helpers.dependency_overrider import DependencyOverrider diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index 04186b59..b7a9817d 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -229,7 +229,7 @@ def test_cannot_create_experiment_that_keywords_has_wrong_combination3(client, s ) -def test_cannot_create_experiment_that_keywords_has_wrong_combination3(client, setup_router_db): +def test_cannot_create_experiment_that_keywords_has_wrong_combination4(client, setup_router_db): """ Test src/mavedb/lib/validation/keywords.validate_keyword_keys function If choose Other in Variant Library Creation Method, should not have in vitro @@ -541,7 +541,7 @@ def test_anonymous_cannot_update_others_user_public_experiment_set( assert response.status_code == 401 response_data = response.json() - assert f"Could not validate credentials" in response_data["detail"] + assert "Could not validate credentials" in response_data["detail"] def test_admin_can_update_other_users_public_experiment_set( @@ -628,7 +628,7 @@ def test_anonymous_cannot_update_other_users_private_experiment( assert response.status_code == 401 response_data = response.json() - assert f"Could not validate credentials" in response_data["detail"] + assert "Could not validate credentials" in response_data["detail"] @pytest.mark.parametrize( @@ -1221,7 +1221,9 @@ def test_contributor_can_add_experiment_to_others_private_experiment_set(session assert response.status_code == 200 -def test_contributor_can_add_experiment_to_others_public_experiment_set(session, data_provider, client, setup_router_db, data_files): +def test_contributor_can_add_experiment_to_others_public_experiment_set( + session, data_provider, client, setup_router_db, data_files +): experiment = create_experiment(client) score_set = create_seq_score_set_with_variants( client, session, data_provider, experiment["urn"], data_files / "scores.csv" @@ -1257,7 +1259,9 @@ def test_cannot_add_experiment_to_others_private_experiment_set(session, client, assert f"experiment set with URN '{experiment_set_urn}' not found" in response_data["detail"] -def test_cannot_add_experiment_to_others_public_experiment_set(session, data_provider, client, setup_router_db, data_files): +def test_cannot_add_experiment_to_others_public_experiment_set( + session, data_provider, client, setup_router_db, data_files +): experiment = create_experiment(client) score_set = create_seq_score_set_with_variants( client, session, data_provider, experiment["urn"], data_files / "scores.csv" diff --git a/tests/routers/test_hgvs.py b/tests/routers/test_hgvs.py index 6480bbd9..f6b394a0 100644 --- a/tests/routers/test_hgvs.py +++ b/tests/routers/test_hgvs.py @@ -1,5 +1,4 @@ import requests_mock -import pytest import cdot.hgvs.dataproviders from hgvs.exceptions import HGVSDataNotAvailableError diff --git a/tests/routers/test_permissions.py b/tests/routers/test_permissions.py index e3bcf79f..c999ee22 100644 --- a/tests/routers/test_permissions.py +++ b/tests/routers/test_permissions.py @@ -15,13 +15,17 @@ # Experiment set tests def test_get_true_permission_from_own_experiment_set_add_experiment_check(client, setup_router_db): experiment = create_experiment(client) - response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + response = client.get( + f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment" + ) assert response.status_code == 200 - assert response.json() == True + assert response.json() -def test_contributor_gets_true_permission_from_others_experiment_set_add_experiment_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_experiment_set_add_experiment_check( + session, client, setup_router_db +): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) @@ -33,10 +37,12 @@ def test_contributor_gets_true_permission_from_others_experiment_set_add_experim TEST_USER["first_name"], TEST_USER["last_name"], ) - response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + response = client.get( + f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment" + ) assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_get_false_permission_from_others_experiment_set_add_experiment_check(session, client, setup_router_db): @@ -44,10 +50,12 @@ def test_get_false_permission_from_others_experiment_set_add_experiment_check(se change_ownership(session, experiment["urn"], ExperimentDbModel) change_ownership(session, experiment["experimentSetUrn"], ExperimentSetDbModel) - response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment") + response = client.get( + f"/api/v1/permissions/user-is-permitted/experiment-set/{experiment['experimentSetUrn']}/add_experiment" + ) assert response.status_code == 200 - assert response.json() == False + assert not response.json() def test_cannot_get_permission_with_wrong_action_in_experiment_set(client, setup_router_db): @@ -56,13 +64,15 @@ def test_cannot_get_permission_with_wrong_action_in_experiment_set(client, setup assert response.status_code == 422 response_data = response.json() - assert response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " \ - "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," \ - " 'add_role', 'publish'" + assert ( + response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " + "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," + " 'add_role', 'publish'" + ) def test_cannot_get_permission_with_non_existing_experiment_set(client, setup_router_db): - response = client.get(f"/api/v1/permissions/user-is-permitted/experiment-set/invalidUrn/update") + response = client.get("/api/v1/permissions/user-is-permitted/experiment-set/invalidUrn/update") assert response.status_code == 404 response_data = response.json() @@ -75,7 +85,7 @@ def test_get_true_permission_from_own_experiment_update_check(client, setup_rout response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/update") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_get_true_permission_from_own_experiment_delete_check(client, setup_router_db): @@ -83,7 +93,7 @@ def test_get_true_permission_from_own_experiment_delete_check(client, setup_rout response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/delete") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_get_true_permission_from_own_experiment_add_score_set_check(client, setup_router_db): @@ -91,7 +101,7 @@ def test_get_true_permission_from_own_experiment_add_score_set_check(client, set response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_contributor_gets_true_permission_from_others_experiment_update_check(session, client, setup_router_db): @@ -108,7 +118,7 @@ def test_contributor_gets_true_permission_from_others_experiment_update_check(se response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/update") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_contributor_gets_true_permission_from_others_experiment_delete_check(session, client, setup_router_db): @@ -125,10 +135,12 @@ def test_contributor_gets_true_permission_from_others_experiment_delete_check(se response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/delete") assert response.status_code == 200 - assert response.json() == True + assert response.json() -def test_contributor_gets_true_permission_from_others_private_experiment_add_score_set_check(session, client, setup_router_db): +def test_contributor_gets_true_permission_from_others_private_experiment_add_score_set_check( + session, client, setup_router_db +): experiment = create_experiment(client) change_ownership(session, experiment["urn"], ExperimentDbModel) add_contributor( @@ -142,7 +154,7 @@ def test_contributor_gets_true_permission_from_others_private_experiment_add_sco response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_get_false_permission_from_others_private_experiment_add_score_set_check(session, client, setup_router_db): @@ -152,10 +164,12 @@ def test_get_false_permission_from_others_private_experiment_add_score_set_check response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/add_score_set") assert response.status_code == 200 - assert response.json() == False + assert not response.json() -def test_get_true_permission_from_others_public_experiment_add_score_set_check(session, data_provider, client, setup_router_db, data_files): +def test_get_true_permission_from_others_public_experiment_add_score_set_check( + session, data_provider, client, setup_router_db, data_files +): experiment = create_experiment(client) score_set_1 = create_seq_score_set_with_variants( client, session, data_provider, experiment["urn"], data_files / "scores.csv" @@ -166,7 +180,7 @@ def test_get_true_permission_from_others_public_experiment_add_score_set_check(s response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{pub_experiment_urn}/add_score_set") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_get_false_permission_from_others_experiment_update_check(session, client, setup_router_db): @@ -176,7 +190,7 @@ def test_get_false_permission_from_others_experiment_update_check(session, clien response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/update") assert response.status_code == 200 - assert response.json() == False + assert not response.json() def test_get_false_permission_from_other_users_experiment_delete_check(session, client, setup_router_db): @@ -186,7 +200,7 @@ def test_get_false_permission_from_other_users_experiment_delete_check(session, response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/{experiment['urn']}/delete") assert response.status_code == 200 - assert response.json() == False + assert not response.json() def test_cannot_get_permission_with_wrong_action_in_experiment(client, setup_router_db): @@ -195,13 +209,15 @@ def test_cannot_get_permission_with_wrong_action_in_experiment(client, setup_rou assert response.status_code == 422 response_data = response.json() - assert response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " \ - "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," \ - " 'add_role', 'publish'" + assert ( + response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " + "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," + " 'add_role', 'publish'" + ) def test_cannot_get_permission_with_non_existing_experiment(client, setup_router_db): - response = client.get(f"/api/v1/permissions/user-is-permitted/experiment/invalidUrn/update") + response = client.get("/api/v1/permissions/user-is-permitted/experiment/invalidUrn/update") assert response.status_code == 404 response_data = response.json() @@ -215,7 +231,7 @@ def test_get_true_permission_from_own_score_set_update_check(client, setup_route response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/update") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_get_true_permission_from_own_score_set_delete_check(client, setup_router_db): @@ -224,7 +240,7 @@ def test_get_true_permission_from_own_score_set_delete_check(client, setup_route response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/delete") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_get_true_permission_from_own_score_set_publish_check(client, setup_router_db): @@ -233,7 +249,7 @@ def test_get_true_permission_from_own_score_set_publish_check(client, setup_rout response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/publish") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_contributor_gets_true_permission_from_others_score_set_update_check(session, client, setup_router_db): @@ -251,7 +267,7 @@ def test_contributor_gets_true_permission_from_others_score_set_update_check(ses response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/update") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_contributor_gets_true_permission_from_others_score_set_delete_check(session, client, setup_router_db): @@ -269,7 +285,7 @@ def test_contributor_gets_true_permission_from_others_score_set_delete_check(ses response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/delete") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_contributor_gets_true_permission_from_others_score_set_publish_check(session, client, setup_router_db): @@ -287,7 +303,7 @@ def test_contributor_gets_true_permission_from_others_score_set_publish_check(se response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/publish") assert response.status_code == 200 - assert response.json() == True + assert response.json() def test_get_false_permission_from_others_score_set_delete_check(session, client, setup_router_db): @@ -298,7 +314,7 @@ def test_get_false_permission_from_others_score_set_delete_check(session, client response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/delete") assert response.status_code == 200 - assert response.json() == False + assert not response.json() def test_get_false_permission_from_others_score_set_update_check(session, client, setup_router_db): @@ -309,7 +325,7 @@ def test_get_false_permission_from_others_score_set_update_check(session, client response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/update") assert response.status_code == 200 - assert response.json() == False + assert not response.json() def test_get_false_permission_from_others_score_set_publish_check(session, client, setup_router_db): @@ -320,7 +336,7 @@ def test_get_false_permission_from_others_score_set_publish_check(session, clien response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/{score_set['urn']}/publish") assert response.status_code == 200 - assert response.json() == False + assert not response.json() def test_cannot_get_permission_with_wrong_action_in_score_set(client, setup_router_db): @@ -330,13 +346,15 @@ def test_cannot_get_permission_with_wrong_action_in_score_set(client, setup_rout assert response.status_code == 422 response_data = response.json() - assert response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " \ - "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," \ - " 'add_role', 'publish'" + assert ( + response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: 'read', " + "'update', 'delete', 'add_experiment', 'add_score_set', 'set_scores'," + " 'add_role', 'publish'" + ) -def test_cannot_get_permission_with_non_existing_experiment(client, setup_router_db): - response = client.get(f"/api/v1/permissions/user-is-permitted/score-set/invalidUrn/update") +def test_cannot_get_permission_with_non_existing_score_set(client, setup_router_db): + response = client.get("/api/v1/permissions/user-is-permitted/score-set/invalidUrn/update") assert response.status_code == 404 response_data = response.json() @@ -345,9 +363,11 @@ def test_cannot_get_permission_with_non_existing_experiment(client, setup_router # Common invalid test def test_cannot_get_permission_with_non_existing_item(client, setup_router_db): - response = client.get(f"/api/v1/permissions/user-is-permitted/invalidModel/invalidUrn/update") + response = client.get("/api/v1/permissions/user-is-permitted/invalidModel/invalidUrn/update") assert response.status_code == 422 response_data = response.json() - assert response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: " \ - "'experiment', 'experiment-set', 'score-set'" + assert ( + response_data["detail"][0]["msg"] == "value is not a valid enumeration member; permitted: " + "'experiment', 'experiment-set', 'score-set'" + ) diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index 80308c27..09f37a29 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -4,7 +4,6 @@ from unittest.mock import patch import jsonschema -import pytest from arq import ArqRedis from mavedb.lib.validation.urn_re import MAVEDB_TMP_URN_RE from mavedb.models.enums.processing_state import ProcessingState @@ -1007,14 +1006,10 @@ def test_search_score_sets_match(session, data_provider, client, setup_router_db def test_search_score_sets_urn_match(session, data_provider, client, setup_router_db, data_files): experiment_1 = create_experiment(client) score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv" + client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" ) - search_payload = {"urn": score_set_1_1['urn']} + search_payload = {"urn": score_set_1_1["urn"]} response = client.post("/api/v1/score-sets/search", json=search_payload) assert response.status_code == 200 assert len(response.json()) == 1 @@ -1025,13 +1020,9 @@ def test_search_score_sets_urn_match(session, data_provider, client, setup_route def test_search_score_sets_urn_with_space_match(session, data_provider, client, setup_router_db, data_files): experiment_1 = create_experiment(client) score_set_1_1 = create_seq_score_set_with_variants( - client, - session, - data_provider, - experiment_1["urn"], - data_files / "scores.csv" + client, session, data_provider, experiment_1["urn"], data_files / "scores.csv" ) - urn_with_space = score_set_1_1['urn'] + " " + urn_with_space = score_set_1_1["urn"] + " " search_payload = {"urn": urn_with_space} response = client.post("/api/v1/score-sets/search", json=search_payload) assert response.status_code == 200 @@ -1212,7 +1203,9 @@ def test_contributor_can_add_score_set_to_others_private_experiment(session, cli assert response.status_code == 200 -def test_contributor_can_add_score_set_to_others_public_experiment(session, data_provider, client, setup_router_db, data_files): +def test_contributor_can_add_score_set_to_others_public_experiment( + session, data_provider, client, setup_router_db, data_files +): experiment = create_experiment(client) score_set = create_seq_score_set_with_variants( client, session, data_provider, experiment["urn"], data_files / "scores.csv" @@ -1230,4 +1223,4 @@ def test_contributor_can_add_score_set_to_others_public_experiment(session, data score_set_post_payload = deepcopy(TEST_MINIMAL_SEQ_SCORESET) score_set_post_payload["experimentUrn"] = published_score_set["experiment"]["urn"] response = client.post("/api/v1/score-sets/", json=score_set_post_payload) - assert response.status_code == 200 \ No newline at end of file + assert response.status_code == 200 diff --git a/tests/routers/test_statistics.py b/tests/routers/test_statistics.py index 1f32847a..249c86a7 100644 --- a/tests/routers/test_statistics.py +++ b/tests/routers/test_statistics.py @@ -3,7 +3,6 @@ import cdot.hgvs.dataproviders import pytest from humps import camelize -from mavedb.models.controlled_keyword import ControlledKeyword from tests.helpers.constants import ( TEST_BIORXIV_IDENTIFIER, @@ -233,7 +232,7 @@ def test_target_gene_empty_field(client): [ ({"dbName": "PubMed", "identifier": f"{TEST_PUBMED_IDENTIFIER}"}), ({"dbName": "bioRxiv", "identifier": f"{TEST_BIORXIV_IDENTIFIER}"}), - (({"dbName": "medRxiv", "identifier": f"{TEST_MEDRXIV_IDENTIFIER}"})), + ({"dbName": "medRxiv", "identifier": f"{TEST_MEDRXIV_IDENTIFIER}"}), ], indirect=["mock_publication_fetch"], ) @@ -278,11 +277,12 @@ def test_record_keyword_statistics(session, data_provider, client, setup_router_ # updates. Folding these more complex setup steps into a fixture is more trouble than it's worth. experiment = create_experiment(client, record_update) score_set = create_seq_score_set_with_variants( - client, session, data_provider, experiment["urn"], data_files / "scores.csv") + client, session, data_provider, experiment["urn"], data_files / "scores.csv" + ) publish_score_set(client, score_set["urn"]) - response = client.get(f"/api/v1/statistics/record/experiment/keywords") + response = client.get("/api/v1/statistics/record/experiment/keywords") desired_field_values = ["SaCas9", "Endogenous locus library method", "Base editor", "Other"] for desired_field_value in desired_field_values: assert_statistic(desired_field_value, response) diff --git a/tests/routers/test_users.py b/tests/routers/test_users.py index f8386dcc..c49cf4f9 100644 --- a/tests/routers/test_users.py +++ b/tests/routers/test_users.py @@ -1,6 +1,5 @@ import pytest -from fastapi import Header from mavedb.models.enums.user_role import UserRole from mavedb.lib.authentication import get_current_user @@ -79,12 +78,15 @@ def test_cannot_impersonate_admin_user_as_default_user(client, setup_router_db, # NOTE: We can't mock JWTBearer directly because the object is created when the `get_current_user` function is called. # Instead, mock the function that decodes the JWT and present a fake `Bearer test` string that # lets us reach the `decode_jwt` function call without raising exceptions. - with DependencyOverrider( - { - get_current_user: get_current_user, - require_current_user: require_current_user, - } - ), mock.patch("mavedb.lib.authentication.decode_jwt", lambda _: {"sub": TEST_USER["username"]}): + with ( + DependencyOverrider( + { + get_current_user: get_current_user, + require_current_user: require_current_user, + } + ), + mock.patch("mavedb.lib.authentication.decode_jwt", lambda _: {"sub": TEST_USER["username"]}), + ): response = client.get( "/api/v1/users/me", headers={"Authorization": "Bearer test", "X-Active-Roles": f"{UserRole.admin.name},ordinary user"}, diff --git a/tests/validation/test_dataframe.py b/tests/validation/test_dataframe.py index 6216cbbc..c9676456 100644 --- a/tests/validation/test_dataframe.py +++ b/tests/validation/test_dataframe.py @@ -1,12 +1,10 @@ import itertools from unittest import TestCase -import os.path import numpy as np import pandas as pd import pytest import cdot.hgvs.dataproviders -from pathlib import Path from unittest.mock import patch from tests.helpers.constants import VALID_ACCESSION, TEST_CDOT_TRANSCRIPT @@ -761,24 +759,32 @@ def test_valid_columns_single_target(self): for column in self.valid_hgvs_columns: with self.subTest(column=column): validate_hgvs_transgenic_column( - column, is_index=False, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) for column in self.valid_hgvs_columns_invalid_for_index: with self.subTest(column=column): validate_hgvs_transgenic_column( - column, is_index=False, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) def test_valid_columns_multi_target(self): for column in self.valid_hgvs_columns_multi_target: with self.subTest(column=column): validate_hgvs_transgenic_column( - column, is_index=False, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) for column in self.valid_hgvs_columns_invalid_for_index_multi_target: with self.subTest(column=column): validate_hgvs_transgenic_column( - column, is_index=False, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) # Test when supplied targets do not contain a DNA sequence (only valid for hgvs_nt col) @@ -787,7 +793,9 @@ def test_valid_columns_invalid_supplied_targets(self): with self.subTest(column=column): with self.assertRaises(ValueError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_pt": self.pt_sequence_test_case}, # type: ignore ) # Test when multiple supplied targets do not contain a DNA sequence (only valid for hgvs_nt col) @@ -796,7 +804,9 @@ def test_valid_columns_invalid_supplied_targets_multi_target(self): with self.subTest(column=column): with self.assertRaises(ValueError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_pt": self.pt_sequence_test_case, "test_pt_2": self.pt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_pt": self.pt_sequence_test_case, "test_pt_2": self.pt_sequence_test_case}, # type: ignore ) def test_valid_columns_invalid_column_name(self): @@ -804,7 +814,9 @@ def test_valid_columns_invalid_column_name(self): with self.subTest(column=column): with self.assertRaises(ValueError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) def test_valid_columns_invalid_column_name_multi_target(self): @@ -812,33 +824,43 @@ def test_valid_columns_invalid_column_name_multi_target(self): with self.subTest(column=column): with self.assertRaises(ValueError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) def test_index_columns(self): for column in self.valid_hgvs_columns: with self.subTest(column=column): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) for column in self.valid_hgvs_columns_invalid_for_index: with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) def test_index_columns_multi_target(self): for column in self.valid_hgvs_columns_multi_target: with self.subTest(column=column): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) for column in self.valid_hgvs_columns_invalid_for_index_multi_target: with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) def test_invalid_column_values(self): @@ -846,13 +868,17 @@ def test_invalid_column_values(self): with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=False, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) for column in self.invalid_hgvs_columns_by_contents: with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) def test_invalid_column_values_multi_target(self): @@ -860,13 +886,17 @@ def test_invalid_column_values_multi_target(self): with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=False, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) for column in self.invalid_hgvs_columns_by_contents_multi_target: with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) def test_valid_column_values_wrong_column_name(self): @@ -874,13 +904,17 @@ def test_valid_column_values_wrong_column_name(self): with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=False, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) for column in self.invalid_hgvs_columns_by_name: with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case}, # type: ignore ) def test_valid_column_values_wrong_column_name_multi_target(self): @@ -888,13 +922,17 @@ def test_valid_column_values_wrong_column_name_multi_target(self): with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=False, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=False, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) for column in self.invalid_hgvs_columns_by_name: with self.subTest(column=column): with self.assertRaises(ValidationError): validate_hgvs_transgenic_column( - column, is_index=True, targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case} # type: ignore + column, + is_index=True, + targets={"test_nt": self.nt_sequence_test_case, "test_pt": self.pt_sequence_test_case}, # type: ignore ) @@ -953,7 +991,9 @@ def test_valid_variant(self): "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT, ): - validate_hgvs_genomic_column(self.valid_hgvs_column, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider) # type: ignore + validate_hgvs_genomic_column( + self.valid_hgvs_column, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider + ) # type: ignore def test_valid_variant_valid_missing(self): with patch.object( @@ -961,7 +1001,9 @@ def test_valid_variant_valid_missing(self): "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT, ): - validate_hgvs_genomic_column(self.missing_data, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider) # type: ignore + validate_hgvs_genomic_column( + self.missing_data, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider + ) # type: ignore def test_valid_variant_valid_duplicate(self): with patch.object( @@ -969,7 +1011,9 @@ def test_valid_variant_valid_duplicate(self): "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT, ): - validate_hgvs_genomic_column(self.missing_data, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider) # type: ignore + validate_hgvs_genomic_column( + self.missing_data, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider + ) # type: ignore def test_valid_variant_index(self): with patch.object( @@ -977,7 +1021,9 @@ def test_valid_variant_index(self): "_get_transcript", return_value=TEST_CDOT_TRANSCRIPT, ): - validate_hgvs_genomic_column(self.valid_hgvs_column, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider) # type: ignore + validate_hgvs_genomic_column( + self.valid_hgvs_column, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider + ) # type: ignore def test_valid_variant_invalid_missing_index(self): with ( @@ -988,7 +1034,9 @@ def test_valid_variant_invalid_missing_index(self): return_value=TEST_CDOT_TRANSCRIPT, ), ): - validate_hgvs_genomic_column(self.missing_data, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider) # type: ignore + validate_hgvs_genomic_column( + self.missing_data, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider + ) # type: ignore def test_valid_variant_invalid_duplicate_index(self): with ( @@ -999,7 +1047,9 @@ def test_valid_variant_invalid_duplicate_index(self): return_value=TEST_CDOT_TRANSCRIPT, ), ): - validate_hgvs_genomic_column(self.duplicate_data, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider) # type: ignore + validate_hgvs_genomic_column( + self.duplicate_data, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider + ) # type: ignore def test_invalid_column_values(self): for column in self.invalid_hgvs_columns_by_contents: @@ -1013,7 +1063,10 @@ def test_invalid_column_values(self): ), ): validate_hgvs_genomic_column( - column, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider # type: ignore + column, + is_index=False, + targets=[self.accession_test_case], + hdp=self.human_data_provider, # type: ignore ) for column in self.invalid_hgvs_columns_by_contents: with ( @@ -1026,7 +1079,10 @@ def test_invalid_column_values(self): ), ): validate_hgvs_genomic_column( - column, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider # type: ignore + column, + is_index=True, + targets=[self.accession_test_case], + hdp=self.human_data_provider, # type: ignore ) def test_valid_column_values_wrong_column_name(self): @@ -1041,7 +1097,10 @@ def test_valid_column_values_wrong_column_name(self): ), ): validate_hgvs_genomic_column( - column, is_index=False, targets=[self.accession_test_case], hdp=self.human_data_provider # type: ignore + column, + is_index=False, + targets=[self.accession_test_case], + hdp=self.human_data_provider, # type: ignore ) for column in self.invalid_hgvs_columns_by_name: with ( @@ -1054,7 +1113,10 @@ def test_valid_column_values_wrong_column_name(self): ), ): validate_hgvs_genomic_column( - column, is_index=True, targets=[self.accession_test_case], hdp=self.human_data_provider # type: ignore + column, + is_index=True, + targets=[self.accession_test_case], + hdp=self.human_data_provider, # type: ignore ) # TODO: Test multiple targets diff --git a/tests/validation/test_publication.py b/tests/validation/test_publication.py index 0edaf12b..bca0abfa 100644 --- a/tests/validation/test_publication.py +++ b/tests/validation/test_publication.py @@ -15,16 +15,16 @@ class TestValidateGenericPublication(TestCase): def test_valid_pubmed(self): - assert validate_publication("20711111") == None + assert validate_publication("20711111") is None def test_valid_biorxiv(self): - assert validate_publication("207222") == None + assert validate_publication("207222") is None def test_valid_medrxiv(self): - assert validate_publication("20733333") == None + assert validate_publication("20733333") is None def test_valid_crossref(self): - assert validate_publication("10.1101/1234") == None + assert validate_publication("10.1101/1234") is None def test_invalid_identifier(self): with self.assertRaises(ValidationError): @@ -37,7 +37,7 @@ def test_valid_pubmed(self): assert validate_pubmed("20711111") def test_invalid_pubmed(self): - assert validate_pubmed("invalid_id") == False + assert validate_pubmed("invalid_id") is False class TestValidateBioRxivPublication(TestCase): @@ -48,12 +48,12 @@ def test_valid_biorxiv_old(self): assert validate_biorxiv("207222") def test_invalid_biorxiv_new(self): - assert validate_biorxiv("2018.12.12.207222") == False + assert validate_biorxiv("2018.12.12.207222") is False def test_invalid_biorxiv_old(self): - assert validate_biorxiv("20722") == False - assert validate_biorxiv("2072222") == False - assert validate_biorxiv("invalid") == False + assert validate_biorxiv("20722") is False + assert validate_biorxiv("2072222") is False + assert validate_biorxiv("invalid") is False class TestValidateMedRxivPublication(TestCase): @@ -64,12 +64,12 @@ def test_valid_medrxiv_old(self): assert validate_medrxiv("20733333") def test_invalid_medrxiv_new(self): - assert validate_medrxiv("2018.12.12.20733333") == False + assert validate_medrxiv("2018.12.12.20733333") is False def test_invalid_medrxiv_old(self): - assert validate_medrxiv("2073333") == False - assert validate_medrxiv("207333333") == False - assert validate_medrxiv("invalid") == False + assert validate_medrxiv("2073333") is False + assert validate_medrxiv("207333333") is False + assert validate_medrxiv("invalid") is False class TestIdentifierValidFor(TestCase): @@ -133,7 +133,7 @@ def test_valid_crossref(self): class TestValidateDbName(TestCase): def test_valid_names(self): for name in valid_dbnames: - assert validate_db_name(name) == None + assert validate_db_name(name) is None def test_empty_name(self): with self.assertRaises(ValidationError): diff --git a/tests/view_models/test_target_gene.py b/tests/view_models/test_target_gene.py index eb8e2a9c..29f5a8dd 100644 --- a/tests/view_models/test_target_gene.py +++ b/tests/view_models/test_target_gene.py @@ -1,7 +1,6 @@ from mavedb.view_models.target_gene import TargetGeneCreate import pytest -import datetime def test_create_target_gene_with_sequence(): @@ -19,9 +18,17 @@ def test_create_target_gene_with_sequence(): "CTTACTCTAGCTTCCCGGCAACAATTAATAGACTGGATGGAGGCGGATAAAGTTGCAGGACCACTTCTGCGCTCGGCCCTTCCGGCTGGCTGGTTTAT" "TGCTGATAAATCTGGAGCCGGTGAGCGTGGGTCTCGCGGTATCATTGCAGCACTGGGGCCAGATGGTAAGCCCTCCCGTATCGTAGTTATCTACACGA" "CGGGGAGTCAGGCAACTATGGATGAACGAAATAGACAGATCGCTGAGATAGGTGCCTCACTGATTAAGCATTGGTAA", - "taxonomy": {"taxId": 9606, "organismName": "Homo sapiens", "commonName": "human", "rank": "SPECIES", - "hasDescribedSpeciesName": True, "articleReference": "NCBI:txid9606", "genomeId": None, - "id": 14, "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606"}, + "taxonomy": { + "taxId": 9606, + "organismName": "Homo sapiens", + "commonName": "human", + "rank": "SPECIES", + "hasDescribedSpeciesName": True, + "articleReference": "NCBI:txid9606", + "genomeId": None, + "id": 14, + "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606", + }, } externalIdentifier = TargetGeneCreate( name=name, @@ -52,9 +59,17 @@ def test_create_invalid_category(): name = "UBE2I" invalid_category = "invalid name" external_identifiers = [{"identifier": {"dbName": "Ensembl", "identifier": "ENSG00000103275"}, "offset": 0}] - taxonomy = {"taxId": 9606, "organismName": "Homo sapiens", "commonName": "human", "rank": "SPECIES", - "hasDescribedSpeciesName": True, "articleReference": "NCBI:txid9606", "genomeId": None, - "id": 14, "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606"} + taxonomy = { + "taxId": 9606, + "organismName": "Homo sapiens", + "commonName": "human", + "rank": "SPECIES", + "hasDescribedSpeciesName": True, + "articleReference": "NCBI:txid9606", + "genomeId": None, + "id": 14, + "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606", + } target_sequence = { "sequenceType": "dna", "sequence": "ATGAGTATTCAACATTTCCGTGTCGCCCTTATTCCCTTTTTTGCGGCATTTTGCCTTCCTGTTTTTGCTCACCCAGAAACGCTGGTGAAAGTAAAAGA" @@ -85,9 +100,17 @@ def test_create_invalid_sequence_type(): name = "UBE2I" category = "Regulatory" external_identifiers = [{"identifier": {"dbName": "Ensembl", "identifier": "ENSG00000103275"}, "offset": 0}] - taxonomy = {"taxId": 9606, "organismName": "Homo sapiens", "commonName": "human", "rank": "SPECIES", - "hasDescribedSpeciesName": True, "articleReference": "NCBI:txid9606", "genomeId": None, - "id": 14, "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606"} + taxonomy = { + "taxId": 9606, + "organismName": "Homo sapiens", + "commonName": "human", + "rank": "SPECIES", + "hasDescribedSpeciesName": True, + "articleReference": "NCBI:txid9606", + "genomeId": None, + "id": 14, + "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606", + } target_sequence = { "sequenceType": "dnaa", "sequence": "ATGAGTATTCAACATTTCCGTGTCGCCCTTATTCCCTTTTTTGCGGCATTTTGCCTTCCTGTTTTTGCTCACCCAGAAACGCTGGTGAAAGTAAAAGA" @@ -116,9 +139,17 @@ def test_create_not_match_sequence_and_type(): category = "Regulatory" external_identifiers = [{"identifier": {"dbName": "Ensembl", "identifier": "ENSG00000103275"}, "offset": 0}] target_sequence = {"sequenceType": "dna", "sequence": "ARCG"} - taxonomy = {"taxId": 9606, "organismName": "Homo sapiens", "commonName": "human", "rank": "SPECIES", - "hasDescribedSpeciesName": True, "articleReference": "NCBI:txid9606", "genomeId": None, - "id": 14, "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606"} + taxonomy = { + "taxId": 9606, + "organismName": "Homo sapiens", + "commonName": "human", + "rank": "SPECIES", + "hasDescribedSpeciesName": True, + "articleReference": "NCBI:txid9606", + "genomeId": None, + "id": 14, + "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606", + } with pytest.raises(ValueError) as exc_info: TargetGeneCreate( name=name, @@ -135,9 +166,17 @@ def test_create_invalid_sequence(): category = "Regulatory" external_identifiers = [{"identifier": {"dbName": "Ensembl", "identifier": "ENSG00000103275"}, "offset": 0}] target_sequence = {"sequenceType": "dna", "sequence": "AOCG%"} - taxonomy = {"taxId": 9606, "organismName": "Homo sapiens", "commonName": "human", "rank": "SPECIES", - "hasDescribedSpeciesName": True, "articleReference": "NCBI:txid9606", "genomeId": None, - "id": 14, "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606"} + taxonomy = { + "taxId": 9606, + "organismName": "Homo sapiens", + "commonName": "human", + "rank": "SPECIES", + "hasDescribedSpeciesName": True, + "articleReference": "NCBI:txid9606", + "genomeId": None, + "id": 14, + "url": "https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?mode=info&id=9606", + } with pytest.raises(ValueError) as exc_info: TargetGeneCreate( name=name, @@ -180,13 +219,13 @@ def test_cant_create_target_gene_with_both_sequence_and_accession(): "TGCTGATAAATCTGGAGCCGGTGAGCGTGGGTCTCGCGGTATCATTGCAGCACTGGGGCCAGATGGTAAGCCCTCCCGTATCGTAGTTATCTACACGA" "CGGGGAGTCAGGCAACTATGGATGAACGAAATAGACAGATCGCTGAGATAGGTGCCTCACTGATTAAGCATTGGTAA", "taxonomy": { - "taxId": 9606, - "organismName": "Homo sapiens", - "commonName": "human", - "rank": "SPECIES", - "hasDescribedSpeciesName": True, - "articleReference": "NCBI:txid9606", - "genomeId": None, + "taxId": 9606, + "organismName": "Homo sapiens", + "commonName": "human", + "rank": "SPECIES", + "hasDescribedSpeciesName": True, + "articleReference": "NCBI:txid9606", + "genomeId": None, }, } with pytest.raises(ValueError) as exc_info: diff --git a/tests/view_models/test_target_sequence.py b/tests/view_models/test_target_sequence.py index b5f15e04..d2bb5696 100644 --- a/tests/view_models/test_target_sequence.py +++ b/tests/view_models/test_target_sequence.py @@ -68,9 +68,7 @@ def test_cannot_create_target_sequence_with_label_containing_colon(): taxonomy = TEST_TAXONOMY with pytest.raises(ValueError) as exc_info: - target_sequence = TargetSequenceCreate( - sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label - ) + TargetSequenceCreate(sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label) assert f"Target sequence label `{label}` may not contain a colon." in str(exc_info.value) @@ -82,9 +80,7 @@ def test_cannot_create_target_sequence_with_invalid_sequence_type(): taxonomy = TEST_TAXONOMY with pytest.raises(ValueError) as exc_info: - target_sequence = TargetSequenceCreate( - sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label - ) + TargetSequenceCreate(sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label) assert f"'{sequence_type}' is not a valid sequence type" in str(exc_info.value) @@ -96,9 +92,7 @@ def test_cannot_create_target_sequence_with_invalid_inferred_type(): taxonomy = TEST_TAXONOMY with pytest.raises(ValueError) as exc_info: - target_sequence = TargetSequenceCreate( - sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label - ) + TargetSequenceCreate(sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label) assert "sequence is invalid" in str(exc_info.value) @@ -117,8 +111,6 @@ def test_cannot_create_target_sequence_with_invalid_sequence(sequence_type, exc_ taxonomy = TEST_TAXONOMY with pytest.raises(ValueError) as exc_info: - target_sequence = TargetSequenceCreate( - sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label - ) + TargetSequenceCreate(sequence_type=sequence_type, sequence=sequence, taxonomy=taxonomy, label=label) assert exc_string in str(exc_info.value) diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index 08a3c4a9..372dd701 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -2,10 +2,8 @@ from asyncio.unix_events import _UnixSelectorEventLoop from copy import deepcopy -from requests import HTTPError from uuid import uuid4 from unittest.mock import patch -import requests_mock import arq.jobs import cdot.hgvs.dataproviders @@ -207,7 +205,9 @@ async def test_create_variants_for_score_set_with_caught_exception( # This is somewhat dumb and wouldn't actually happen like this, but it serves as an effective way to guarantee # some exception will be raised no matter what in the async job. - with (patch.object(pd.DataFrame, "isnull", side_effect=Exception) as mocked_exc,): + with ( + patch.object(pd.DataFrame, "isnull", side_effect=Exception) as mocked_exc, + ): result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) @@ -239,7 +239,9 @@ async def test_create_variants_for_score_set_with_caught_base_exception( # This is somewhat (extra) dumb and wouldn't actually happen like this, but it serves as an effective way to guarantee # some base exception will be handled no matter what in the async job. - with (patch.object(pd.DataFrame, "isnull", side_effect=BaseException),): + with ( + patch.object(pd.DataFrame, "isnull", side_effect=BaseException), + ): result = await create_variants_for_score_set( standalone_worker_context, uuid4().hex, score_set.id, 1, scores, counts ) @@ -430,16 +432,18 @@ async def test_create_variants_for_score_set_enqueues_manager_and_successful_map async def dummy_mapping_job(): return await setup_mapping_output(async_client, session, score_set) - with patch.object( - cdot.hgvs.dataproviders.RESTDataProvider, - "_get_transcript", - return_value=TEST_CDOT_TRANSCRIPT, - ) as hdp, patch.object( - _UnixSelectorEventLoop, - "run_in_executor", - return_value=dummy_mapping_job(), - ), patch( - "mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0 + with ( + patch.object( + cdot.hgvs.dataproviders.RESTDataProvider, + "_get_transcript", + return_value=TEST_CDOT_TRANSCRIPT, + ) as hdp, + patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ), + patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0), ): await arq_redis.enqueue_job("create_variants_for_score_set", uuid4().hex, score_set.id, 1, scores, counts) await arq_worker.async_run() @@ -706,6 +710,7 @@ async def test_create_mapped_variants_for_scoreset_exception_in_mapping_setup_vr assert score_set.mapping_state == MappingState.failed assert score_set.mapping_errors is not None + @pytest.mark.asyncio async def test_create_mapped_variants_for_scoreset_mapping_exception( setup_worker_db, async_client, standalone_worker_context, session, data_files @@ -901,11 +906,14 @@ async def dummy_mapping_job(): # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - with patch.object( - _UnixSelectorEventLoop, - "run_in_executor", - return_value=dummy_mapping_job(), - ), patch.object(ArqRedis, "lpush", awaitable_exception()): + with ( + patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ), + patch.object(ArqRedis, "lpush", awaitable_exception()), + ): result = await map_variants_for_score_set(standalone_worker_context, uuid4().hex, score_set.id, 1) score_set = session.scalars(select(ScoreSetDbModel).where(ScoreSetDbModel.urn == score_set.urn)).one() @@ -1108,8 +1116,9 @@ async def test_mapping_manager_occupied_queue_mapping_in_progress_error_during_e ) await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "5") - with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.in_progress), patch.object( - ArqRedis, "enqueue_job", return_value=awaitable_exception() + with ( + patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.in_progress), + patch.object(ArqRedis, "enqueue_job", return_value=awaitable_exception()), ): result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) @@ -1135,8 +1144,9 @@ async def test_mapping_manager_occupied_queue_mapping_not_in_progress_error_duri ) await standalone_worker_context["redis"].set(MAPPING_CURRENT_ID_NAME, "") - with patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.not_found), patch.object( - ArqRedis, "enqueue_job", return_value=awaitable_exception() + with ( + patch.object(arq.jobs.Job, "status", return_value=arq.jobs.JobStatus.not_found), + patch.object(ArqRedis, "enqueue_job", return_value=awaitable_exception()), ): result = await variant_mapper_manager(standalone_worker_context, uuid4().hex, 1) @@ -1344,11 +1354,14 @@ async def dummy_mapping_job(): # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - with patch.object( - _UnixSelectorEventLoop, - "run_in_executor", - return_value=dummy_mapping_job(), - ), patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0): + with ( + patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + return_value=dummy_mapping_job(), + ), + patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0), + ): await arq_redis.enqueue_job("variant_mapper_manager", uuid4().hex, 1) await arq_worker.async_run() await arq_worker.run_check() @@ -1385,11 +1398,14 @@ async def dummy_mapping_job(): # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - with patch.object( - _UnixSelectorEventLoop, - "run_in_executor", - side_effect=[failed_mapping_job(), dummy_mapping_job()], - ), patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0): + with ( + patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + side_effect=[failed_mapping_job(), dummy_mapping_job()], + ), + patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0), + ): await arq_redis.enqueue_job("variant_mapper_manager", uuid4().hex, 1) await arq_worker.async_run() await arq_worker.run_check() @@ -1423,11 +1439,14 @@ async def failed_mapping_job(): # We seem unable to mock requests via requests_mock that occur inside another event loop. Workaround # this limitation by instead patching the _UnixSelectorEventLoop 's executor function, with a coroutine # object that sets up test mappingn output. - with patch.object( - _UnixSelectorEventLoop, - "run_in_executor", - side_effect=[failed_mapping_job()] * 5, - ), patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0): + with ( + patch.object( + _UnixSelectorEventLoop, + "run_in_executor", + side_effect=[failed_mapping_job()] * 5, + ), + patch("mavedb.worker.jobs.BACKOFF_IN_SECONDS", 0), + ): await arq_redis.enqueue_job("variant_mapper_manager", uuid4().hex, 1) await arq_worker.async_run() await arq_worker.run_check() From d620aeb6dfe551b9a179a764781e1fb840f041a0 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 11 Oct 2024 15:35:13 -0700 Subject: [PATCH 55/58] Sort Imports --- alembic/alembic_helpers.py | 3 +- alembic/env.py | 5 +- .../194cfebabe32_rename_wild_type_sequence.py | 1 - ...42909_make_index_on_contributors_unique.py | 1 - ...2e_add_publication_identifier_metadata_.py | 9 +- .../2b6f40ea2fb6_add_score_range_column.py | 4 +- ...consolidate_publication_identifier_dois.py | 3 +- .../33e99d4b90cc_add_mapped_variants_table.py | 32 +- ...de_roles_as_enums_access_keys_can_have_.py | 3 +- ...8f64b_simplify_reference_genome_target_.py | 3 +- ...ifiers_table_to_publication_identifiers.py | 1 - .../60103ad1cb5b_add_target_sequence_label.py | 3 +- alembic/versions/6fae83d65ee4_taxonomy.py | 3 +- .../versions/76e1e55bc5c1_add_contributors.py | 89 +- ...nify_publication_identifier_date_fields.py | 3 +- ...86e059ad1a8_score_set_processing_errors.py | 3 +- .../8bcb2b4edc60_add_foreign_key_indices.py | 1 - ...add_is_first_login_column_to_user_model.py | 3 +- .../90e7860964a2_add_target_accession.py | 3 +- .../9702d32bacb3_controlled_keyword.py | 3 +- alembic/versions/988ca84c701b_.py | 54 +- alembic/versions/9d566d915a2c_.py | 1066 ++++++++++------- ...dd7d9b9_add_gene_name_column_to_target_.py | 3 +- .../d7e6f8c3b9dc_scoreset_mapping_columns.py | 6 +- .../da9ba478647d_add_primary_publication.py | 3 +- .../versions/ec5d2787bec9_external_links.py | 13 +- alembic/versions/f11fd758436e_.py | 23 +- ...f36cf612e029_additional_mapping_columns.py | 9 +- .../versions/fecb3e0d181d_make_urns_unique.py | 1 - mypy_stubs/bioutils/assemblies.pyi | 5 +- .../hgvs/dataproviders/fasta_seqfetcher.pyi | 2 +- .../hgvs/dataproviders/json_data_provider.pyi | 31 +- mypy_stubs/hgvs/dataproviders/interface.pyi | 2 - mypy_stubs/hgvs/dataproviders/seqfetcher.pyi | 1 - mypy_stubs/hgvs/dataproviders/uta.pyi | 11 +- mypy_stubs/hgvs/exceptions.pyi | 1 - mypy_stubs/hgvs/parser.pyi | 3 +- mypy_stubs/hgvs/validator.pyi | 2 +- mypy_stubs/idutils/__init__.pyi | 6 +- mypy_stubs/mavehgvs/__init__.py | 4 +- mypy_stubs/mavehgvs/position.pyi | 17 +- mypy_stubs/mavehgvs/variant.pyi | 14 +- src/mavedb/data_providers/services.py | 2 +- src/mavedb/db/base.py | 2 +- src/mavedb/deps.py | 4 +- src/mavedb/lib/authentication.py | 10 +- src/mavedb/lib/authorization.py | 3 +- src/mavedb/lib/contributors.py | 3 +- src/mavedb/lib/experiments.py | 12 +- src/mavedb/lib/identifiers.py | 8 +- src/mavedb/lib/keywords.py | 10 +- src/mavedb/lib/logging/canonical.py | 5 +- src/mavedb/lib/logging/context.py | 11 +- src/mavedb/lib/logging/logged_route.py | 8 +- src/mavedb/lib/mave/hgvs.py | 6 +- src/mavedb/lib/mave/variant.py | 4 +- src/mavedb/lib/orcid.py | 2 +- src/mavedb/lib/permissions.py | 20 +- src/mavedb/lib/score_sets.py | 23 +- src/mavedb/lib/script_environment.py | 2 +- src/mavedb/lib/slack.py | 2 +- src/mavedb/lib/taxonomies.py | 3 +- src/mavedb/lib/urns.py | 7 +- src/mavedb/lib/validation/dataframe.py | 3 +- src/mavedb/lib/validation/identifier.py | 2 +- src/mavedb/lib/validation/keywords.py | 34 +- src/mavedb/lib/validation/publication.py | 6 +- src/mavedb/lib/validation/target.py | 5 +- src/mavedb/lib/validation/utilities.py | 16 +- src/mavedb/lib/validation/variant.py | 7 +- src/mavedb/logging/config.py | 1 + src/mavedb/models/access_key.py | 7 +- src/mavedb/models/controlled_keyword.py | 4 +- src/mavedb/models/ensembl_offset.py | 4 +- src/mavedb/models/experiment.py | 56 +- .../models/experiment_controlled_keyword.py | 15 +- .../experiment_publication_identifier.py | 10 +- src/mavedb/models/experiment_set.py | 9 +- src/mavedb/models/mapped_variant.py | 3 +- src/mavedb/models/refseq_offset.py | 4 +- src/mavedb/models/role.py | 2 +- src/mavedb/models/score_set.py | 23 +- .../score_set_publication_identifier.py | 12 +- src/mavedb/models/target_gene.py | 10 +- src/mavedb/models/target_sequence.py | 9 +- src/mavedb/models/taxonomy.py | 5 +- src/mavedb/models/uniprot_offset.py | 6 +- src/mavedb/models/user.py | 8 +- src/mavedb/models/variant.py | 3 +- src/mavedb/routers/access_keys.py | 4 +- src/mavedb/routers/api_information.py | 5 +- src/mavedb/routers/controlled_keywords.py | 10 +- src/mavedb/routers/experiment_sets.py | 6 +- src/mavedb/routers/experiments.py | 11 +- src/mavedb/routers/hgvs.py | 4 +- src/mavedb/routers/mapped_variant.py | 3 +- src/mavedb/routers/orcid.py | 6 +- src/mavedb/routers/permissions.py | 12 +- src/mavedb/routers/score_sets.py | 24 +- src/mavedb/routers/statistics.py | 19 +- src/mavedb/routers/target_gene_identifiers.py | 4 +- src/mavedb/routers/taxonomies.py | 36 +- src/mavedb/routers/users.py | 4 +- src/mavedb/scripts/export_public_data.py | 4 +- src/mavedb/server_main.py | 19 +- src/mavedb/view_models/__init__.py | 3 +- src/mavedb/view_models/access_key.py | 2 +- src/mavedb/view_models/base/base.py | 3 +- src/mavedb/view_models/contributor.py | 1 - src/mavedb/view_models/doi_identifier.py | 2 +- .../experiment_controlled_keyword.py | 9 +- .../view_models/external_gene_identifier.py | 2 +- .../external_gene_identifier_offset.py | 2 +- src/mavedb/view_models/keyword.py | 8 +- src/mavedb/view_models/mapped_variant.py | 2 +- .../view_models/publication_identifier.py | 8 +- src/mavedb/view_models/score_set.py | 10 +- src/mavedb/view_models/target_gene.py | 11 +- src/mavedb/view_models/target_sequence.py | 8 +- src/mavedb/view_models/taxonomy.py | 2 +- src/mavedb/view_models/user.py | 4 +- src/mavedb/worker/__init__.py | 1 - src/mavedb/worker/jobs.py | 13 +- src/mavedb/worker/settings.py | 4 +- tests/conftest.py | 6 +- tests/dump_rels.py | 6 +- tests/helpers/constants.py | 2 + tests/helpers/util.py | 8 +- tests/lib/conftest.py | 5 +- tests/lib/test_authentication.py | 11 +- tests/lib/test_score_set.py | 9 +- tests/routers/conftest.py | 2 +- tests/routers/test_access_keys.py | 8 +- tests/routers/test_experiments.py | 16 +- tests/routers/test_hgvs.py | 7 +- tests/routers/test_permissions.py | 6 +- tests/routers/test_score_set.py | 4 +- tests/routers/test_users.py | 9 +- tests/validation/test_dataframe.py | 7 +- tests/validation/test_identifier.py | 12 +- tests/validation/test_keywords.py | 69 +- tests/validation/test_publication.py | 14 +- tests/validation/test_target.py | 4 +- tests/validation/test_urn_re.py | 6 +- tests/validation/test_utilities.py | 3 +- tests/validation/test_variant.py | 4 +- .../test_external_gene_identifiers.py | 4 +- tests/view_models/test_keyword.py | 12 +- .../test_publication_identifier.py | 11 +- tests/view_models/test_score_set.py | 42 +- tests/view_models/test_target_gene.py | 4 +- tests/view_models/test_target_sequence.py | 5 +- tests/view_models/test_user.py | 4 +- tests/view_models/test_wild_type_sequence.py | 27 +- tests/worker/conftest.py | 4 +- tests/worker/test_jobs.py | 21 +- 156 files changed, 1353 insertions(+), 1129 deletions(-) diff --git a/alembic/alembic_helpers.py b/alembic/alembic_helpers.py index 7055fc19..902f52cb 100644 --- a/alembic/alembic_helpers.py +++ b/alembic/alembic_helpers.py @@ -1,10 +1,11 @@ # From https://improveandrepeat.com/2021/09/python-friday-87-handling-pre-existing-tables-with-alembic-and-sqlalchemy/ # Based on https://github.com/talkpython/data-driven-web-apps-with-flask -from alembic import op from sqlalchemy import engine_from_config from sqlalchemy.engine import reflection +from alembic import op + def table_does_not_exist(table, schema=None): config = op.get_context().config diff --git a/alembic/env.py b/alembic/env.py index cb30dd06..9cdc453d 100644 --- a/alembic/env.py +++ b/alembic/env.py @@ -1,8 +1,7 @@ -from logging.config import fileConfig import os +from logging.config import fileConfig -from sqlalchemy import engine_from_config -from sqlalchemy import pool +from sqlalchemy import engine_from_config, pool from alembic import context diff --git a/alembic/versions/194cfebabe32_rename_wild_type_sequence.py b/alembic/versions/194cfebabe32_rename_wild_type_sequence.py index 78607f58..c8f25068 100644 --- a/alembic/versions/194cfebabe32_rename_wild_type_sequence.py +++ b/alembic/versions/194cfebabe32_rename_wild_type_sequence.py @@ -8,7 +8,6 @@ from alembic import op - # revision identifiers, used by Alembic. revision = "194cfebabe32" down_revision = "44d5c568f64b" diff --git a/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py b/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py index 38a40b03..97fdfa37 100644 --- a/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py +++ b/alembic/versions/1cee01c42909_make_index_on_contributors_unique.py @@ -8,7 +8,6 @@ from alembic import op - # revision identifiers, used by Alembic. revision = "1cee01c42909" down_revision = "1d4933b4b6f7" diff --git a/alembic/versions/22e2d92d602e_add_publication_identifier_metadata_.py b/alembic/versions/22e2d92d602e_add_publication_identifier_metadata_.py index 7a62ce01..13c079c0 100644 --- a/alembic/versions/22e2d92d602e_add_publication_identifier_metadata_.py +++ b/alembic/versions/22e2d92d602e_add_publication_identifier_metadata_.py @@ -5,20 +5,21 @@ Create Date: 2023-06-01 14:51:04.700969 """ -from typing import Optional + import os +from typing import Optional import eutils -from eutils._internal.xmlfacades.pubmedarticleset import PubmedArticleSet import sqlalchemy as sa from eutils import EutilsNCBIError -from mavedb.lib.exceptions import AmbiguousIdentifierError +from eutils._internal.xmlfacades.pubmedarticleset import PubmedArticleSet from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import Session from alembic import op -from mavedb.lib.identifiers import ExternalPublication +from mavedb.lib.exceptions import AmbiguousIdentifierError from mavedb.lib.external_publications import Rxiv +from mavedb.lib.identifiers import ExternalPublication from mavedb.models.publication_identifier import PublicationIdentifier # revision identifiers, used by Alembic. diff --git a/alembic/versions/2b6f40ea2fb6_add_score_range_column.py b/alembic/versions/2b6f40ea2fb6_add_score_range_column.py index b2de846e..099a7634 100644 --- a/alembic/versions/2b6f40ea2fb6_add_score_range_column.py +++ b/alembic/versions/2b6f40ea2fb6_add_score_range_column.py @@ -5,10 +5,12 @@ Create Date: 2024-09-09 12:25:33.180077 """ -from alembic import op + import sqlalchemy as sa from sqlalchemy.dialects import postgresql +from alembic import op + # revision identifiers, used by Alembic. revision = "2b6f40ea2fb6" down_revision = "1cee01c42909" diff --git a/alembic/versions/2c8d7e2bf2fe_consolidate_publication_identifier_dois.py b/alembic/versions/2c8d7e2bf2fe_consolidate_publication_identifier_dois.py index 43167150..09d7449b 100644 --- a/alembic/versions/2c8d7e2bf2fe_consolidate_publication_identifier_dois.py +++ b/alembic/versions/2c8d7e2bf2fe_consolidate_publication_identifier_dois.py @@ -5,9 +5,10 @@ Create Date: 2024-05-16 13:06:05.561411 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "2c8d7e2bf2fe" diff --git a/alembic/versions/33e99d4b90cc_add_mapped_variants_table.py b/alembic/versions/33e99d4b90cc_add_mapped_variants_table.py index bb6492a5..6eff735b 100644 --- a/alembic/versions/33e99d4b90cc_add_mapped_variants_table.py +++ b/alembic/versions/33e99d4b90cc_add_mapped_variants_table.py @@ -5,33 +5,39 @@ Create Date: 2023-05-15 17:24:07.847206 """ -from alembic import op + import sqlalchemy as sa from sqlalchemy.dialects import postgresql +from alembic import op + # revision identifiers, used by Alembic. -revision = '33e99d4b90cc' -down_revision = 'da9ba478647d' +revision = "33e99d4b90cc" +down_revision = "da9ba478647d" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.create_table('mapped_variants', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('pre_mapped', postgresql.JSONB(astext_type=sa.Text()), nullable=False), - sa.Column('post_mapped', postgresql.JSONB(astext_type=sa.Text()), nullable=False), - sa.Column('variant_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['variant_id'], ['variants.id'], ), - sa.PrimaryKeyConstraint('id') + op.create_table( + "mapped_variants", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("pre_mapped", postgresql.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("post_mapped", postgresql.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("variant_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["variant_id"], + ["variants.id"], + ), + sa.PrimaryKeyConstraint("id"), ) - op.create_index(op.f('ix_mapped_variants_id'), 'mapped_variants', ['id'], unique=False) + op.create_index(op.f("ix_mapped_variants_id"), "mapped_variants", ["id"], unique=False) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.drop_index(op.f('ix_mapped_variants_id'), table_name='mapped_variants') - op.drop_table('mapped_variants') + op.drop_index(op.f("ix_mapped_variants_id"), table_name="mapped_variants") + op.drop_table("mapped_variants") # ### end Alembic commands ### diff --git a/alembic/versions/377bb8c30bde_roles_as_enums_access_keys_can_have_.py b/alembic/versions/377bb8c30bde_roles_as_enums_access_keys_can_have_.py index 96ccc1ec..2cee5bac 100644 --- a/alembic/versions/377bb8c30bde_roles_as_enums_access_keys_can_have_.py +++ b/alembic/versions/377bb8c30bde_roles_as_enums_access_keys_can_have_.py @@ -5,9 +5,10 @@ Create Date: 2024-04-18 09:19:04.008237 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "377bb8c30bde" diff --git a/alembic/versions/44d5c568f64b_simplify_reference_genome_target_.py b/alembic/versions/44d5c568f64b_simplify_reference_genome_target_.py index 96e43a0c..7b6d080c 100644 --- a/alembic/versions/44d5c568f64b_simplify_reference_genome_target_.py +++ b/alembic/versions/44d5c568f64b_simplify_reference_genome_target_.py @@ -5,9 +5,10 @@ Create Date: 2023-08-24 15:20:01.208691 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "44d5c568f64b" diff --git a/alembic/versions/5cad62af3705_rename_pubmed_identifiers_table_to_publication_identifiers.py b/alembic/versions/5cad62af3705_rename_pubmed_identifiers_table_to_publication_identifiers.py index ffee217c..e3d9e987 100644 --- a/alembic/versions/5cad62af3705_rename_pubmed_identifiers_table_to_publication_identifiers.py +++ b/alembic/versions/5cad62af3705_rename_pubmed_identifiers_table_to_publication_identifiers.py @@ -8,7 +8,6 @@ from alembic import op - # revision identifiers, used by Alembic. revision = "5cad62af3705" down_revision = "f11fd758436e" diff --git a/alembic/versions/60103ad1cb5b_add_target_sequence_label.py b/alembic/versions/60103ad1cb5b_add_target_sequence_label.py index c6408303..b0c09e13 100644 --- a/alembic/versions/60103ad1cb5b_add_target_sequence_label.py +++ b/alembic/versions/60103ad1cb5b_add_target_sequence_label.py @@ -5,9 +5,10 @@ Create Date: 2023-08-29 16:04:44.620385 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "60103ad1cb5b" diff --git a/alembic/versions/6fae83d65ee4_taxonomy.py b/alembic/versions/6fae83d65ee4_taxonomy.py index 02711685..9c03f314 100644 --- a/alembic/versions/6fae83d65ee4_taxonomy.py +++ b/alembic/versions/6fae83d65ee4_taxonomy.py @@ -5,9 +5,10 @@ Create Date: 2023-12-21 18:06:18.912925 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "6fae83d65ee4" diff --git a/alembic/versions/76e1e55bc5c1_add_contributors.py b/alembic/versions/76e1e55bc5c1_add_contributors.py index 885614cd..d4feed83 100644 --- a/alembic/versions/76e1e55bc5c1_add_contributors.py +++ b/alembic/versions/76e1e55bc5c1_add_contributors.py @@ -5,52 +5,75 @@ Create Date: 2024-08-22 06:17:03.265438 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. -revision = '76e1e55bc5c1' -down_revision = '9702d32bacb3' +revision = "76e1e55bc5c1" +down_revision = "9702d32bacb3" branch_labels = None depends_on = None def upgrade(): - op.create_table('contributors', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('orcid_id', sa.String(), nullable=False), - sa.Column('given_name', sa.String(), nullable=True), - sa.Column('family_name', sa.String(), nullable=True), - sa.PrimaryKeyConstraint('id') + op.create_table( + "contributors", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("orcid_id", sa.String(), nullable=False), + sa.Column("given_name", sa.String(), nullable=True), + sa.Column("family_name", sa.String(), nullable=True), + sa.PrimaryKeyConstraint("id"), ) - op.create_index(op.f('ix_contributors_orcid_id'), 'contributors', ['orcid_id'], unique=False) - op.create_table('experiment_set_contributors', - sa.Column('experiment_set_id', sa.Integer(), nullable=False), - sa.Column('contributor_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['contributor_id'], ['contributors.id'], ), - sa.ForeignKeyConstraint(['experiment_set_id'], ['experiment_sets.id'], ), - sa.PrimaryKeyConstraint('experiment_set_id', 'contributor_id') + op.create_index(op.f("ix_contributors_orcid_id"), "contributors", ["orcid_id"], unique=False) + op.create_table( + "experiment_set_contributors", + sa.Column("experiment_set_id", sa.Integer(), nullable=False), + sa.Column("contributor_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["contributor_id"], + ["contributors.id"], + ), + sa.ForeignKeyConstraint( + ["experiment_set_id"], + ["experiment_sets.id"], + ), + sa.PrimaryKeyConstraint("experiment_set_id", "contributor_id"), ) - op.create_table('experiment_contributors', - sa.Column('experiment_id', sa.Integer(), nullable=False), - sa.Column('contributor_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['contributor_id'], ['contributors.id'], ), - sa.ForeignKeyConstraint(['experiment_id'], ['experiments.id'], ), - sa.PrimaryKeyConstraint('experiment_id', 'contributor_id') + op.create_table( + "experiment_contributors", + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("contributor_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["contributor_id"], + ["contributors.id"], + ), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.id"], + ), + sa.PrimaryKeyConstraint("experiment_id", "contributor_id"), ) - op.create_table('scoreset_contributors', - sa.Column('scoreset_id', sa.Integer(), nullable=False), - sa.Column('contributor_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['contributor_id'], ['contributors.id'], ), - sa.ForeignKeyConstraint(['scoreset_id'], ['scoresets.id'], ), - sa.PrimaryKeyConstraint('scoreset_id', 'contributor_id') + op.create_table( + "scoreset_contributors", + sa.Column("scoreset_id", sa.Integer(), nullable=False), + sa.Column("contributor_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["contributor_id"], + ["contributors.id"], + ), + sa.ForeignKeyConstraint( + ["scoreset_id"], + ["scoresets.id"], + ), + sa.PrimaryKeyConstraint("scoreset_id", "contributor_id"), ) def downgrade(): - op.drop_table('scoreset_contributors') - op.drop_table('experiment_contributors') - op.drop_table('experiment_set_contributors') - op.drop_index(op.f('ix_contributors_orcid_id'), table_name='contributors') - op.drop_table('contributors') + op.drop_table("scoreset_contributors") + op.drop_table("experiment_contributors") + op.drop_table("experiment_set_contributors") + op.drop_index(op.f("ix_contributors_orcid_id"), table_name="contributors") + op.drop_table("contributors") diff --git a/alembic/versions/7a345f1bf9c3_unify_publication_identifier_date_fields.py b/alembic/versions/7a345f1bf9c3_unify_publication_identifier_date_fields.py index 737094ff..40437e32 100644 --- a/alembic/versions/7a345f1bf9c3_unify_publication_identifier_date_fields.py +++ b/alembic/versions/7a345f1bf9c3_unify_publication_identifier_date_fields.py @@ -5,9 +5,10 @@ Create Date: 2024-05-17 16:09:39.508434 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "7a345f1bf9c3" diff --git a/alembic/versions/886e059ad1a8_score_set_processing_errors.py b/alembic/versions/886e059ad1a8_score_set_processing_errors.py index e24395a7..dcb14361 100644 --- a/alembic/versions/886e059ad1a8_score_set_processing_errors.py +++ b/alembic/versions/886e059ad1a8_score_set_processing_errors.py @@ -5,10 +5,11 @@ Create Date: 2024-03-19 16:28:05.797406 """ -from alembic import op + import sqlalchemy as sa from sqlalchemy.dialects.postgresql import JSONB +from alembic import op # revision identifiers, used by Alembic. revision = "886e059ad1a8" diff --git a/alembic/versions/8bcb2b4edc60_add_foreign_key_indices.py b/alembic/versions/8bcb2b4edc60_add_foreign_key_indices.py index 5b5904de..0dffa120 100644 --- a/alembic/versions/8bcb2b4edc60_add_foreign_key_indices.py +++ b/alembic/versions/8bcb2b4edc60_add_foreign_key_indices.py @@ -8,7 +8,6 @@ from alembic import op - # revision identifiers, used by Alembic. revision = "8bcb2b4edc60" down_revision = "8e26f1a1160d" diff --git a/alembic/versions/8e26f1a1160d_add_is_first_login_column_to_user_model.py b/alembic/versions/8e26f1a1160d_add_is_first_login_column_to_user_model.py index 9933b1d0..6bb2187f 100644 --- a/alembic/versions/8e26f1a1160d_add_is_first_login_column_to_user_model.py +++ b/alembic/versions/8e26f1a1160d_add_is_first_login_column_to_user_model.py @@ -5,9 +5,10 @@ Create Date: 2024-04-29 11:15:07.067857 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "8e26f1a1160d" diff --git a/alembic/versions/90e7860964a2_add_target_accession.py b/alembic/versions/90e7860964a2_add_target_accession.py index a44e4f57..a4b94c17 100644 --- a/alembic/versions/90e7860964a2_add_target_accession.py +++ b/alembic/versions/90e7860964a2_add_target_accession.py @@ -5,9 +5,10 @@ Create Date: 2023-08-04 11:40:37.434740 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "90e7860964a2" diff --git a/alembic/versions/9702d32bacb3_controlled_keyword.py b/alembic/versions/9702d32bacb3_controlled_keyword.py index a032ff08..fda0fa36 100644 --- a/alembic/versions/9702d32bacb3_controlled_keyword.py +++ b/alembic/versions/9702d32bacb3_controlled_keyword.py @@ -5,9 +5,10 @@ Create Date: 2024-05-24 14:48:50.266496 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "9702d32bacb3" diff --git a/alembic/versions/988ca84c701b_.py b/alembic/versions/988ca84c701b_.py index f3c21c9d..5b48a95a 100644 --- a/alembic/versions/988ca84c701b_.py +++ b/alembic/versions/988ca84c701b_.py @@ -5,48 +5,46 @@ Create Date: 2023-04-10 05:19:28.099693 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. -revision = '988ca84c701b' -down_revision = '9d566d915a2c' +revision = "988ca84c701b" +down_revision = "9d566d915a2c" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.create_table('licenses', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('long_name', sa.String(), nullable=False), - sa.Column('short_name', sa.String(), nullable=False), - sa.Column('text', sa.String(), nullable=False), - sa.Column('link', sa.String(), nullable=True), - sa.Column('version', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('long_name'), - sa.UniqueConstraint('short_name') + op.create_table( + "licenses", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("long_name", sa.String(), nullable=False), + sa.Column("short_name", sa.String(), nullable=False), + sa.Column("text", sa.String(), nullable=False), + sa.Column("link", sa.String(), nullable=True), + sa.Column("version", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("long_name"), + sa.UniqueConstraint("short_name"), ) - op.create_index(op.f('ix_licenses_id'), 'licenses', ['id'], unique=False) - op.alter_column('scoresets', 'licence_id', - existing_type=sa.INTEGER(), - nullable=False) - op.create_foreign_key(None, 'scoresets', 'licenses', ['licence_id'], ['id']) - op.create_foreign_key(None, 'scoresets', 'scoresets', ['replaces_id'], ['id']) + op.create_index(op.f("ix_licenses_id"), "licenses", ["id"], unique=False) + op.alter_column("scoresets", "licence_id", existing_type=sa.INTEGER(), nullable=False) + op.create_foreign_key(None, "scoresets", "licenses", ["licence_id"], ["id"]) + op.create_foreign_key(None, "scoresets", "scoresets", ["replaces_id"], ["id"]) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.drop_constraint(None, 'scoresets', type_='foreignkey') - op.drop_constraint(None, 'scoresets', type_='foreignkey') - op.alter_column('scoresets', 'licence_id', - existing_type=sa.INTEGER(), - nullable=True) - op.drop_index(op.f('ix_licenses_id'), table_name='licenses') - op.drop_table('licenses') + op.drop_constraint(None, "scoresets", type_="foreignkey") + op.drop_constraint(None, "scoresets", type_="foreignkey") + op.alter_column("scoresets", "licence_id", existing_type=sa.INTEGER(), nullable=True) + op.drop_index(op.f("ix_licenses_id"), table_name="licenses") + op.drop_table("licenses") # ### end Alembic commands ### diff --git a/alembic/versions/9d566d915a2c_.py b/alembic/versions/9d566d915a2c_.py index b8d2072b..db9e473f 100644 --- a/alembic/versions/9d566d915a2c_.py +++ b/alembic/versions/9d566d915a2c_.py @@ -1,16 +1,18 @@ """empty message Revision ID: 9d566d915a2c -Revises: +Revises: Create Date: 2022-09-04 19:01:51.002043 """ -from alembic import op + import sqlalchemy as sa from sqlalchemy.dialects import postgresql +from alembic import op + # revision identifiers, used by Alembic. -revision = '9d566d915a2c' +revision = "9d566d915a2c" down_revision = None branch_labels = None depends_on = None @@ -18,441 +20,637 @@ def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.create_table('doi_identifiers', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('identifier', sa.String(), nullable=False), - sa.Column('db_name', sa.String(), nullable=False), - sa.Column('db_version', sa.String(), nullable=True), - sa.Column('url', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_doi_identifiers_id'), 'doi_identifiers', ['id'], unique=False) - op.create_table('ensembl_identifiers', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('identifier', sa.String(), nullable=False), - sa.Column('db_name', sa.String(), nullable=False), - sa.Column('db_version', sa.String(), nullable=True), - sa.Column('url', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_ensembl_identifiers_id'), 'ensembl_identifiers', ['id'], unique=False) - op.create_table('genome_identifiers', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('identifier', sa.String(), nullable=False), - sa.Column('db_name', sa.String(), nullable=False), - sa.Column('db_version', sa.String(), nullable=True), - sa.Column('url', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_genome_identifiers_id'), 'genome_identifiers', ['id'], unique=False) - op.create_table('keywords', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('text', sa.String(), nullable=False), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id'), - sa.UniqueConstraint('text') - ) - op.create_index(op.f('ix_keywords_id'), 'keywords', ['id'], unique=False) - op.create_table('pubmed_identifiers', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('identifier', sa.String(), nullable=False), - sa.Column('db_name', sa.String(), nullable=False), - sa.Column('db_version', sa.String(), nullable=True), - sa.Column('url', sa.String(), nullable=True), - sa.Column('reference_html', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_pubmed_identifiers_id'), 'pubmed_identifiers', ['id'], unique=False) - op.create_table('refseq_identifiers', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('identifier', sa.String(), nullable=False), - sa.Column('db_name', sa.String(), nullable=False), - sa.Column('db_version', sa.String(), nullable=True), - sa.Column('url', sa.String(), nullable=True), - sa.Column('reference_html', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_refseq_identifiers_id'), 'refseq_identifiers', ['id'], unique=False) - op.create_table('roles', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('name', sa.String(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_roles_name'), 'roles', ['name'], unique=False) - op.create_table('sra_identifiers', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('identifier', sa.String(), nullable=False), - sa.Column('db_name', sa.String(), nullable=False), - sa.Column('db_version', sa.String(), nullable=True), - sa.Column('url', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_sra_identifiers_id'), 'sra_identifiers', ['id'], unique=False) - op.create_table('uniprot_identifiers', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('identifier', sa.String(), nullable=False), - sa.Column('db_name', sa.String(), nullable=False), - sa.Column('db_version', sa.String(), nullable=True), - sa.Column('url', sa.String(), nullable=True), - sa.Column('reference_html', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_uniprot_identifiers_id'), 'uniprot_identifiers', ['id'], unique=False) - op.create_table('users', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('username', sa.String(), nullable=False), - sa.Column('first_name', sa.String(), nullable=True), - sa.Column('last_name', sa.String(), nullable=True), - sa.Column('is_superuser', sa.Boolean(), nullable=False), - sa.Column('is_staff', sa.Boolean(), nullable=False), - sa.Column('is_active', sa.Boolean(), nullable=False), - sa.Column('date_joined', sa.DateTime(), nullable=True), - sa.Column('email', sa.String(), nullable=True), - sa.Column('last_login', sa.DateTime(), nullable=True), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False) - op.create_table('wild_type_sequences', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('sequence_type', sa.String(), nullable=False), - sa.Column('sequence', sa.String(), nullable=False), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_wild_type_sequences_id'), 'wild_type_sequences', ['id'], unique=False) - op.create_table('access_keys', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('user_id', sa.Integer(), nullable=False), - sa.Column('key_id', sa.String(), nullable=False), - sa.Column('public_key', sa.String(), nullable=False), - sa.Column('name', sa.String(), nullable=True), - sa.Column('expiration_date', sa.Date(), nullable=True), - sa.Column('creation_time', sa.DateTime(), nullable=True), - sa.ForeignKeyConstraint(['user_id'], ['users.id'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_access_keys_key_id'), 'access_keys', ['key_id'], unique=True) - op.create_table('experiment_sets', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('urn', sa.String(length=64), nullable=True), - sa.Column('title', sa.String(), nullable=False), - sa.Column('method_text', sa.String(), nullable=False), - sa.Column('abstract_text', sa.String(), nullable=False), - sa.Column('short_description', sa.String(), nullable=False), - sa.Column('extra_metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=False), - sa.Column('private', sa.Boolean(), nullable=False), - sa.Column('approved', sa.Boolean(), nullable=False), - sa.Column('published_date', sa.Date(), nullable=True), - sa.Column('processing_state', sa.String(), nullable=True), - sa.Column('num_experiments', sa.Integer(), nullable=False), - sa.Column('created_by_id', sa.Integer(), nullable=True), - sa.Column('modified_by_id', sa.Integer(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.ForeignKeyConstraint(['created_by_id'], ['users.id'], ), - sa.ForeignKeyConstraint(['modified_by_id'], ['users.id'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_experiment_sets_id'), 'experiment_sets', ['id'], unique=False) - op.create_table('reference_genomes', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('short_name', sa.String(), nullable=False), - sa.Column('organism_name', sa.String(), nullable=False), - sa.Column('genome_identifier_id', sa.Integer(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.ForeignKeyConstraint(['genome_identifier_id'], ['genome_identifiers.id'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_reference_genomes_id'), 'reference_genomes', ['id'], unique=False) - op.create_table('users_roles', - sa.Column('user_id', sa.Integer(), nullable=False), - sa.Column('role_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ), - sa.ForeignKeyConstraint(['user_id'], ['users.id'], ), - sa.PrimaryKeyConstraint('user_id', 'role_id') - ) - op.create_table('experiment_set_doi_identifiers', - sa.Column('experiment_set_id', sa.Integer(), nullable=False), - sa.Column('doi_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['doi_identifier_id'], ['doi_identifiers.id'], ), - sa.ForeignKeyConstraint(['experiment_set_id'], ['experiment_sets.id'], ), - sa.PrimaryKeyConstraint('experiment_set_id', 'doi_identifier_id') - ) - op.create_table('experiment_set_keywords', - sa.Column('experiment_set_id', sa.Integer(), nullable=False), - sa.Column('keyword_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['experiment_set_id'], ['experiment_sets.id'], ), - sa.ForeignKeyConstraint(['keyword_id'], ['keywords.id'], ), - sa.PrimaryKeyConstraint('experiment_set_id', 'keyword_id') - ) - op.create_table('experiment_set_pubmed_identifiers', - sa.Column('experiment_set_id', sa.Integer(), nullable=False), - sa.Column('pubmed_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['experiment_set_id'], ['experiment_sets.id'], ), - sa.ForeignKeyConstraint(['pubmed_identifier_id'], ['pubmed_identifiers.id'], ), - sa.PrimaryKeyConstraint('experiment_set_id', 'pubmed_identifier_id') - ) - op.create_table('experiment_set_sra_identifiers', - sa.Column('experiment_set_id', sa.Integer(), nullable=False), - sa.Column('sra_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['experiment_set_id'], ['experiment_sets.id'], ), - sa.ForeignKeyConstraint(['sra_identifier_id'], ['sra_identifiers.id'], ), - sa.PrimaryKeyConstraint('experiment_set_id', 'sra_identifier_id') - ) - op.create_table('experiments', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('urn', sa.String(length=64), nullable=True), - sa.Column('title', sa.String(), nullable=False), - sa.Column('short_description', sa.String(), nullable=False), - sa.Column('abstract_text', sa.String(), nullable=False), - sa.Column('method_text', sa.String(), nullable=False), - sa.Column('extra_metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=False), - sa.Column('private', sa.Boolean(), nullable=False), - sa.Column('approved', sa.Boolean(), nullable=False), - sa.Column('published_date', sa.Date(), nullable=True), - sa.Column('processing_state', sa.String(), nullable=True), - sa.Column('num_scoresets', sa.Integer(), nullable=False), - sa.Column('experiment_set_id', sa.Integer(), nullable=True), - sa.Column('created_by_id', sa.Integer(), nullable=True), - sa.Column('modified_by_id', sa.Integer(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.ForeignKeyConstraint(['created_by_id'], ['users.id'], ), - sa.ForeignKeyConstraint(['experiment_set_id'], ['experiment_sets.id'], ), - sa.ForeignKeyConstraint(['modified_by_id'], ['users.id'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_experiments_id'), 'experiments', ['id'], unique=False) - op.create_table('experiment_doi_identifiers', - sa.Column('experiment_id', sa.Integer(), nullable=False), - sa.Column('doi_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['doi_identifier_id'], ['doi_identifiers.id'], ), - sa.ForeignKeyConstraint(['experiment_id'], ['experiments.id'], ), - sa.PrimaryKeyConstraint('experiment_id', 'doi_identifier_id') - ) - op.create_table('experiment_keywords', - sa.Column('experiment_id', sa.Integer(), nullable=False), - sa.Column('keyword_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['experiment_id'], ['experiments.id'], ), - sa.ForeignKeyConstraint(['keyword_id'], ['keywords.id'], ), - sa.PrimaryKeyConstraint('experiment_id', 'keyword_id') - ) - op.create_table('experiment_pubmed_identifiers', - sa.Column('experiment_id', sa.Integer(), nullable=False), - sa.Column('pubmed_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['experiment_id'], ['experiments.id'], ), - sa.ForeignKeyConstraint(['pubmed_identifier_id'], ['pubmed_identifiers.id'], ), - sa.PrimaryKeyConstraint('experiment_id', 'pubmed_identifier_id') - ) - op.create_table('experiment_sra_identifiers', - sa.Column('experiment_id', sa.Integer(), nullable=False), - sa.Column('sra_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['experiment_id'], ['experiments.id'], ), - sa.ForeignKeyConstraint(['sra_identifier_id'], ['sra_identifiers.id'], ), - sa.PrimaryKeyConstraint('experiment_id', 'sra_identifier_id') - ) - op.create_table('scoresets', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('urn', sa.String(length=64), nullable=True), - sa.Column('title', sa.String(), nullable=False), - sa.Column('method_text', sa.String(), nullable=False), - sa.Column('abstract_text', sa.String(), nullable=False), - sa.Column('short_description', sa.String(), nullable=False), - sa.Column('extra_metadata', postgresql.JSONB(astext_type=sa.Text()), nullable=False), - sa.Column('dataset_columns', postgresql.JSONB(astext_type=sa.Text()), nullable=False), - sa.Column('normalised', sa.Boolean(), nullable=False), - sa.Column('private', sa.Boolean(), nullable=False), - sa.Column('approved', sa.Boolean(), nullable=False), - sa.Column('published_date', sa.Date(), nullable=True), - sa.Column('processing_state', sa.Enum('incomplete', 'processing', 'failed', 'success', name='processingstate', native_enum=False, create_constraint=True, length=32), nullable=True), - sa.Column('data_usage_policy', sa.String(), nullable=True), - sa.Column('num_variants', sa.Integer(), nullable=False), - sa.Column('experiment_id', sa.Integer(), nullable=False), - sa.Column('licence_id', sa.Integer(), nullable=True), - sa.Column('replaces_id', sa.Integer(), nullable=True), - sa.Column('created_by_id', sa.Integer(), nullable=True), - sa.Column('modified_by_id', sa.Integer(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.ForeignKeyConstraint(['created_by_id'], ['users.id'], ), - sa.ForeignKeyConstraint(['experiment_id'], ['experiments.id'], ), - sa.ForeignKeyConstraint(['modified_by_id'], ['users.id'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_scoresets_id'), 'scoresets', ['id'], unique=False) - op.create_table('scoreset_doi_identifiers', - sa.Column('scoreset_id', sa.Integer(), nullable=False), - sa.Column('doi_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['doi_identifier_id'], ['doi_identifiers.id'], ), - sa.ForeignKeyConstraint(['scoreset_id'], ['scoresets.id'], ), - sa.PrimaryKeyConstraint('scoreset_id', 'doi_identifier_id') - ) - op.create_table('scoreset_keywords', - sa.Column('scoreset_id', sa.Integer(), nullable=False), - sa.Column('keyword_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['keyword_id'], ['keywords.id'], ), - sa.ForeignKeyConstraint(['scoreset_id'], ['scoresets.id'], ), - sa.PrimaryKeyConstraint('scoreset_id', 'keyword_id') - ) - op.create_table('scoreset_meta_analysis_sources', - sa.Column('source_scoreset_id', sa.Integer(), nullable=False), - sa.Column('meta_analysis_scoreset_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['meta_analysis_scoreset_id'], ['scoresets.id'], ), - sa.ForeignKeyConstraint(['source_scoreset_id'], ['scoresets.id'], ), - sa.PrimaryKeyConstraint('source_scoreset_id', 'meta_analysis_scoreset_id') - ) - op.create_table('scoreset_pubmed_identifiers', - sa.Column('scoreset_id', sa.Integer(), nullable=False), - sa.Column('pubmed_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['pubmed_identifier_id'], ['pubmed_identifiers.id'], ), - sa.ForeignKeyConstraint(['scoreset_id'], ['scoresets.id'], ), - sa.PrimaryKeyConstraint('scoreset_id', 'pubmed_identifier_id') - ) - op.create_table('scoreset_sra_identifiers', - sa.Column('scoreset_id', sa.Integer(), nullable=False), - sa.Column('sra_identifier_id', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['scoreset_id'], ['scoresets.id'], ), - sa.ForeignKeyConstraint(['sra_identifier_id'], ['sra_identifiers.id'], ), - sa.PrimaryKeyConstraint('scoreset_id', 'sra_identifier_id') - ) - op.create_table('target_genes', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('name', sa.String(), nullable=False), - sa.Column('category', sa.String(), nullable=False), - sa.Column('scoreset_id', sa.Integer(), nullable=False), - sa.Column('wt_sequence_id', sa.Integer(), nullable=False), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.ForeignKeyConstraint(['scoreset_id'], ['scoresets.id'], ), - sa.ForeignKeyConstraint(['wt_sequence_id'], ['wild_type_sequences.id'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_target_genes_id'), 'target_genes', ['id'], unique=False) - op.create_table('variants', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('urn', sa.String(length=64), nullable=True), - sa.Column('data', postgresql.JSONB(astext_type=sa.Text()), nullable=False), - sa.Column('scoreset_id', sa.Integer(), nullable=False), - sa.Column('hgvs_nt', sa.String(), nullable=True), - sa.Column('hgvs_pro', sa.String(), nullable=True), - sa.Column('hgvs_splice', sa.String(), nullable=True), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.ForeignKeyConstraint(['scoreset_id'], ['scoresets.id'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_variants_id'), 'variants', ['id'], unique=False) - op.create_table('ensembl_offsets', - sa.Column('identifier_id', sa.Integer(), nullable=False), - sa.Column('target_gene_id', sa.Integer(), nullable=False), - sa.Column('offset', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['identifier_id'], ['ensembl_identifiers.id'], ), - sa.ForeignKeyConstraint(['target_gene_id'], ['target_genes.id'], ), - sa.PrimaryKeyConstraint('identifier_id', 'target_gene_id') - ) - op.create_table('reference_maps', - sa.Column('id', sa.Integer(), nullable=False), - sa.Column('is_primary', sa.Boolean(), nullable=False), - sa.Column('genome_id', sa.Integer(), nullable=False), - sa.Column('target_id', sa.Integer(), nullable=False), - sa.Column('creation_date', sa.Date(), nullable=False), - sa.Column('modification_date', sa.Date(), nullable=False), - sa.ForeignKeyConstraint(['genome_id'], ['reference_genomes.id'], ), - sa.ForeignKeyConstraint(['target_id'], ['target_genes.id'], ), - sa.PrimaryKeyConstraint('id') - ) - op.create_index(op.f('ix_reference_maps_id'), 'reference_maps', ['id'], unique=False) - op.create_table('refseq_offsets', - sa.Column('identifier_id', sa.Integer(), nullable=False), - sa.Column('target_gene_id', sa.Integer(), nullable=False), - sa.Column('offset', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['identifier_id'], ['refseq_identifiers.id'], ), - sa.ForeignKeyConstraint(['target_gene_id'], ['target_genes.id'], ), - sa.PrimaryKeyConstraint('identifier_id', 'target_gene_id') - ) - op.create_table('uniprot_offsets', - sa.Column('identifier_id', sa.Integer(), nullable=False), - sa.Column('target_gene_id', sa.Integer(), nullable=False), - sa.Column('offset', sa.Integer(), nullable=False), - sa.ForeignKeyConstraint(['identifier_id'], ['uniprot_identifiers.id'], ), - sa.ForeignKeyConstraint(['target_gene_id'], ['target_genes.id'], ), - sa.PrimaryKeyConstraint('identifier_id', 'target_gene_id') + op.create_table( + "doi_identifiers", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("identifier", sa.String(), nullable=False), + sa.Column("db_name", sa.String(), nullable=False), + sa.Column("db_version", sa.String(), nullable=True), + sa.Column("url", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_doi_identifiers_id"), "doi_identifiers", ["id"], unique=False) + op.create_table( + "ensembl_identifiers", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("identifier", sa.String(), nullable=False), + sa.Column("db_name", sa.String(), nullable=False), + sa.Column("db_version", sa.String(), nullable=True), + sa.Column("url", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_ensembl_identifiers_id"), "ensembl_identifiers", ["id"], unique=False) + op.create_table( + "genome_identifiers", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("identifier", sa.String(), nullable=False), + sa.Column("db_name", sa.String(), nullable=False), + sa.Column("db_version", sa.String(), nullable=True), + sa.Column("url", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_genome_identifiers_id"), "genome_identifiers", ["id"], unique=False) + op.create_table( + "keywords", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("text", sa.String(), nullable=False), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("text"), + ) + op.create_index(op.f("ix_keywords_id"), "keywords", ["id"], unique=False) + op.create_table( + "pubmed_identifiers", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("identifier", sa.String(), nullable=False), + sa.Column("db_name", sa.String(), nullable=False), + sa.Column("db_version", sa.String(), nullable=True), + sa.Column("url", sa.String(), nullable=True), + sa.Column("reference_html", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_pubmed_identifiers_id"), "pubmed_identifiers", ["id"], unique=False) + op.create_table( + "refseq_identifiers", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("identifier", sa.String(), nullable=False), + sa.Column("db_name", sa.String(), nullable=False), + sa.Column("db_version", sa.String(), nullable=True), + sa.Column("url", sa.String(), nullable=True), + sa.Column("reference_html", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_refseq_identifiers_id"), "refseq_identifiers", ["id"], unique=False) + op.create_table( + "roles", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("name", sa.String(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_roles_name"), "roles", ["name"], unique=False) + op.create_table( + "sra_identifiers", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("identifier", sa.String(), nullable=False), + sa.Column("db_name", sa.String(), nullable=False), + sa.Column("db_version", sa.String(), nullable=True), + sa.Column("url", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_sra_identifiers_id"), "sra_identifiers", ["id"], unique=False) + op.create_table( + "uniprot_identifiers", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("identifier", sa.String(), nullable=False), + sa.Column("db_name", sa.String(), nullable=False), + sa.Column("db_version", sa.String(), nullable=True), + sa.Column("url", sa.String(), nullable=True), + sa.Column("reference_html", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_uniprot_identifiers_id"), "uniprot_identifiers", ["id"], unique=False) + op.create_table( + "users", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("username", sa.String(), nullable=False), + sa.Column("first_name", sa.String(), nullable=True), + sa.Column("last_name", sa.String(), nullable=True), + sa.Column("is_superuser", sa.Boolean(), nullable=False), + sa.Column("is_staff", sa.Boolean(), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.Column("date_joined", sa.DateTime(), nullable=True), + sa.Column("email", sa.String(), nullable=True), + sa.Column("last_login", sa.DateTime(), nullable=True), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_users_username"), "users", ["username"], unique=False) + op.create_table( + "wild_type_sequences", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("sequence_type", sa.String(), nullable=False), + sa.Column("sequence", sa.String(), nullable=False), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_wild_type_sequences_id"), "wild_type_sequences", ["id"], unique=False) + op.create_table( + "access_keys", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("key_id", sa.String(), nullable=False), + sa.Column("public_key", sa.String(), nullable=False), + sa.Column("name", sa.String(), nullable=True), + sa.Column("expiration_date", sa.Date(), nullable=True), + sa.Column("creation_time", sa.DateTime(), nullable=True), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_access_keys_key_id"), "access_keys", ["key_id"], unique=True) + op.create_table( + "experiment_sets", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("urn", sa.String(length=64), nullable=True), + sa.Column("title", sa.String(), nullable=False), + sa.Column("method_text", sa.String(), nullable=False), + sa.Column("abstract_text", sa.String(), nullable=False), + sa.Column("short_description", sa.String(), nullable=False), + sa.Column("extra_metadata", postgresql.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("private", sa.Boolean(), nullable=False), + sa.Column("approved", sa.Boolean(), nullable=False), + sa.Column("published_date", sa.Date(), nullable=True), + sa.Column("processing_state", sa.String(), nullable=True), + sa.Column("num_experiments", sa.Integer(), nullable=False), + sa.Column("created_by_id", sa.Integer(), nullable=True), + sa.Column("modified_by_id", sa.Integer(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.ForeignKeyConstraint( + ["created_by_id"], + ["users.id"], + ), + sa.ForeignKeyConstraint( + ["modified_by_id"], + ["users.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_experiment_sets_id"), "experiment_sets", ["id"], unique=False) + op.create_table( + "reference_genomes", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("short_name", sa.String(), nullable=False), + sa.Column("organism_name", sa.String(), nullable=False), + sa.Column("genome_identifier_id", sa.Integer(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.ForeignKeyConstraint( + ["genome_identifier_id"], + ["genome_identifiers.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_reference_genomes_id"), "reference_genomes", ["id"], unique=False) + op.create_table( + "users_roles", + sa.Column("user_id", sa.Integer(), nullable=False), + sa.Column("role_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["role_id"], + ["roles.id"], + ), + sa.ForeignKeyConstraint( + ["user_id"], + ["users.id"], + ), + sa.PrimaryKeyConstraint("user_id", "role_id"), + ) + op.create_table( + "experiment_set_doi_identifiers", + sa.Column("experiment_set_id", sa.Integer(), nullable=False), + sa.Column("doi_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["doi_identifier_id"], + ["doi_identifiers.id"], + ), + sa.ForeignKeyConstraint( + ["experiment_set_id"], + ["experiment_sets.id"], + ), + sa.PrimaryKeyConstraint("experiment_set_id", "doi_identifier_id"), + ) + op.create_table( + "experiment_set_keywords", + sa.Column("experiment_set_id", sa.Integer(), nullable=False), + sa.Column("keyword_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_set_id"], + ["experiment_sets.id"], + ), + sa.ForeignKeyConstraint( + ["keyword_id"], + ["keywords.id"], + ), + sa.PrimaryKeyConstraint("experiment_set_id", "keyword_id"), + ) + op.create_table( + "experiment_set_pubmed_identifiers", + sa.Column("experiment_set_id", sa.Integer(), nullable=False), + sa.Column("pubmed_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_set_id"], + ["experiment_sets.id"], + ), + sa.ForeignKeyConstraint( + ["pubmed_identifier_id"], + ["pubmed_identifiers.id"], + ), + sa.PrimaryKeyConstraint("experiment_set_id", "pubmed_identifier_id"), + ) + op.create_table( + "experiment_set_sra_identifiers", + sa.Column("experiment_set_id", sa.Integer(), nullable=False), + sa.Column("sra_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_set_id"], + ["experiment_sets.id"], + ), + sa.ForeignKeyConstraint( + ["sra_identifier_id"], + ["sra_identifiers.id"], + ), + sa.PrimaryKeyConstraint("experiment_set_id", "sra_identifier_id"), + ) + op.create_table( + "experiments", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("urn", sa.String(length=64), nullable=True), + sa.Column("title", sa.String(), nullable=False), + sa.Column("short_description", sa.String(), nullable=False), + sa.Column("abstract_text", sa.String(), nullable=False), + sa.Column("method_text", sa.String(), nullable=False), + sa.Column("extra_metadata", postgresql.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("private", sa.Boolean(), nullable=False), + sa.Column("approved", sa.Boolean(), nullable=False), + sa.Column("published_date", sa.Date(), nullable=True), + sa.Column("processing_state", sa.String(), nullable=True), + sa.Column("num_scoresets", sa.Integer(), nullable=False), + sa.Column("experiment_set_id", sa.Integer(), nullable=True), + sa.Column("created_by_id", sa.Integer(), nullable=True), + sa.Column("modified_by_id", sa.Integer(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.ForeignKeyConstraint( + ["created_by_id"], + ["users.id"], + ), + sa.ForeignKeyConstraint( + ["experiment_set_id"], + ["experiment_sets.id"], + ), + sa.ForeignKeyConstraint( + ["modified_by_id"], + ["users.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_experiments_id"), "experiments", ["id"], unique=False) + op.create_table( + "experiment_doi_identifiers", + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("doi_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["doi_identifier_id"], + ["doi_identifiers.id"], + ), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.id"], + ), + sa.PrimaryKeyConstraint("experiment_id", "doi_identifier_id"), + ) + op.create_table( + "experiment_keywords", + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("keyword_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.id"], + ), + sa.ForeignKeyConstraint( + ["keyword_id"], + ["keywords.id"], + ), + sa.PrimaryKeyConstraint("experiment_id", "keyword_id"), + ) + op.create_table( + "experiment_pubmed_identifiers", + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("pubmed_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.id"], + ), + sa.ForeignKeyConstraint( + ["pubmed_identifier_id"], + ["pubmed_identifiers.id"], + ), + sa.PrimaryKeyConstraint("experiment_id", "pubmed_identifier_id"), + ) + op.create_table( + "experiment_sra_identifiers", + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("sra_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.id"], + ), + sa.ForeignKeyConstraint( + ["sra_identifier_id"], + ["sra_identifiers.id"], + ), + sa.PrimaryKeyConstraint("experiment_id", "sra_identifier_id"), + ) + op.create_table( + "scoresets", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("urn", sa.String(length=64), nullable=True), + sa.Column("title", sa.String(), nullable=False), + sa.Column("method_text", sa.String(), nullable=False), + sa.Column("abstract_text", sa.String(), nullable=False), + sa.Column("short_description", sa.String(), nullable=False), + sa.Column("extra_metadata", postgresql.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("dataset_columns", postgresql.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("normalised", sa.Boolean(), nullable=False), + sa.Column("private", sa.Boolean(), nullable=False), + sa.Column("approved", sa.Boolean(), nullable=False), + sa.Column("published_date", sa.Date(), nullable=True), + sa.Column( + "processing_state", + sa.Enum( + "incomplete", + "processing", + "failed", + "success", + name="processingstate", + native_enum=False, + create_constraint=True, + length=32, + ), + nullable=True, + ), + sa.Column("data_usage_policy", sa.String(), nullable=True), + sa.Column("num_variants", sa.Integer(), nullable=False), + sa.Column("experiment_id", sa.Integer(), nullable=False), + sa.Column("licence_id", sa.Integer(), nullable=True), + sa.Column("replaces_id", sa.Integer(), nullable=True), + sa.Column("created_by_id", sa.Integer(), nullable=True), + sa.Column("modified_by_id", sa.Integer(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.ForeignKeyConstraint( + ["created_by_id"], + ["users.id"], + ), + sa.ForeignKeyConstraint( + ["experiment_id"], + ["experiments.id"], + ), + sa.ForeignKeyConstraint( + ["modified_by_id"], + ["users.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_scoresets_id"), "scoresets", ["id"], unique=False) + op.create_table( + "scoreset_doi_identifiers", + sa.Column("scoreset_id", sa.Integer(), nullable=False), + sa.Column("doi_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["doi_identifier_id"], + ["doi_identifiers.id"], + ), + sa.ForeignKeyConstraint( + ["scoreset_id"], + ["scoresets.id"], + ), + sa.PrimaryKeyConstraint("scoreset_id", "doi_identifier_id"), + ) + op.create_table( + "scoreset_keywords", + sa.Column("scoreset_id", sa.Integer(), nullable=False), + sa.Column("keyword_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["keyword_id"], + ["keywords.id"], + ), + sa.ForeignKeyConstraint( + ["scoreset_id"], + ["scoresets.id"], + ), + sa.PrimaryKeyConstraint("scoreset_id", "keyword_id"), + ) + op.create_table( + "scoreset_meta_analysis_sources", + sa.Column("source_scoreset_id", sa.Integer(), nullable=False), + sa.Column("meta_analysis_scoreset_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["meta_analysis_scoreset_id"], + ["scoresets.id"], + ), + sa.ForeignKeyConstraint( + ["source_scoreset_id"], + ["scoresets.id"], + ), + sa.PrimaryKeyConstraint("source_scoreset_id", "meta_analysis_scoreset_id"), + ) + op.create_table( + "scoreset_pubmed_identifiers", + sa.Column("scoreset_id", sa.Integer(), nullable=False), + sa.Column("pubmed_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["pubmed_identifier_id"], + ["pubmed_identifiers.id"], + ), + sa.ForeignKeyConstraint( + ["scoreset_id"], + ["scoresets.id"], + ), + sa.PrimaryKeyConstraint("scoreset_id", "pubmed_identifier_id"), + ) + op.create_table( + "scoreset_sra_identifiers", + sa.Column("scoreset_id", sa.Integer(), nullable=False), + sa.Column("sra_identifier_id", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["scoreset_id"], + ["scoresets.id"], + ), + sa.ForeignKeyConstraint( + ["sra_identifier_id"], + ["sra_identifiers.id"], + ), + sa.PrimaryKeyConstraint("scoreset_id", "sra_identifier_id"), + ) + op.create_table( + "target_genes", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("name", sa.String(), nullable=False), + sa.Column("category", sa.String(), nullable=False), + sa.Column("scoreset_id", sa.Integer(), nullable=False), + sa.Column("wt_sequence_id", sa.Integer(), nullable=False), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.ForeignKeyConstraint( + ["scoreset_id"], + ["scoresets.id"], + ), + sa.ForeignKeyConstraint( + ["wt_sequence_id"], + ["wild_type_sequences.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_target_genes_id"), "target_genes", ["id"], unique=False) + op.create_table( + "variants", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("urn", sa.String(length=64), nullable=True), + sa.Column("data", postgresql.JSONB(astext_type=sa.Text()), nullable=False), + sa.Column("scoreset_id", sa.Integer(), nullable=False), + sa.Column("hgvs_nt", sa.String(), nullable=True), + sa.Column("hgvs_pro", sa.String(), nullable=True), + sa.Column("hgvs_splice", sa.String(), nullable=True), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.ForeignKeyConstraint( + ["scoreset_id"], + ["scoresets.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_variants_id"), "variants", ["id"], unique=False) + op.create_table( + "ensembl_offsets", + sa.Column("identifier_id", sa.Integer(), nullable=False), + sa.Column("target_gene_id", sa.Integer(), nullable=False), + sa.Column("offset", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["identifier_id"], + ["ensembl_identifiers.id"], + ), + sa.ForeignKeyConstraint( + ["target_gene_id"], + ["target_genes.id"], + ), + sa.PrimaryKeyConstraint("identifier_id", "target_gene_id"), + ) + op.create_table( + "reference_maps", + sa.Column("id", sa.Integer(), nullable=False), + sa.Column("is_primary", sa.Boolean(), nullable=False), + sa.Column("genome_id", sa.Integer(), nullable=False), + sa.Column("target_id", sa.Integer(), nullable=False), + sa.Column("creation_date", sa.Date(), nullable=False), + sa.Column("modification_date", sa.Date(), nullable=False), + sa.ForeignKeyConstraint( + ["genome_id"], + ["reference_genomes.id"], + ), + sa.ForeignKeyConstraint( + ["target_id"], + ["target_genes.id"], + ), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_reference_maps_id"), "reference_maps", ["id"], unique=False) + op.create_table( + "refseq_offsets", + sa.Column("identifier_id", sa.Integer(), nullable=False), + sa.Column("target_gene_id", sa.Integer(), nullable=False), + sa.Column("offset", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["identifier_id"], + ["refseq_identifiers.id"], + ), + sa.ForeignKeyConstraint( + ["target_gene_id"], + ["target_genes.id"], + ), + sa.PrimaryKeyConstraint("identifier_id", "target_gene_id"), + ) + op.create_table( + "uniprot_offsets", + sa.Column("identifier_id", sa.Integer(), nullable=False), + sa.Column("target_gene_id", sa.Integer(), nullable=False), + sa.Column("offset", sa.Integer(), nullable=False), + sa.ForeignKeyConstraint( + ["identifier_id"], + ["uniprot_identifiers.id"], + ), + sa.ForeignKeyConstraint( + ["target_gene_id"], + ["target_genes.id"], + ), + sa.PrimaryKeyConstraint("identifier_id", "target_gene_id"), ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.drop_table('uniprot_offsets') - op.drop_table('refseq_offsets') - op.drop_index(op.f('ix_reference_maps_id'), table_name='reference_maps') - op.drop_table('reference_maps') - op.drop_table('ensembl_offsets') - op.drop_index(op.f('ix_variants_id'), table_name='variants') - op.drop_table('variants') - op.drop_index(op.f('ix_target_genes_id'), table_name='target_genes') - op.drop_table('target_genes') - op.drop_table('scoreset_sra_identifiers') - op.drop_table('scoreset_pubmed_identifiers') - op.drop_table('scoreset_meta_analysis_sources') - op.drop_table('scoreset_keywords') - op.drop_table('scoreset_doi_identifiers') - op.drop_index(op.f('ix_scoresets_id'), table_name='scoresets') - op.drop_table('scoresets') - op.drop_table('experiment_sra_identifiers') - op.drop_table('experiment_pubmed_identifiers') - op.drop_table('experiment_keywords') - op.drop_table('experiment_doi_identifiers') - op.drop_index(op.f('ix_experiments_id'), table_name='experiments') - op.drop_table('experiments') - op.drop_table('experiment_set_sra_identifiers') - op.drop_table('experiment_set_pubmed_identifiers') - op.drop_table('experiment_set_keywords') - op.drop_table('experiment_set_doi_identifiers') - op.drop_table('users_roles') - op.drop_index(op.f('ix_reference_genomes_id'), table_name='reference_genomes') - op.drop_table('reference_genomes') - op.drop_index(op.f('ix_experiment_sets_id'), table_name='experiment_sets') - op.drop_table('experiment_sets') - op.drop_index(op.f('ix_access_keys_key_id'), table_name='access_keys') - op.drop_table('access_keys') - op.drop_index(op.f('ix_wild_type_sequences_id'), table_name='wild_type_sequences') - op.drop_table('wild_type_sequences') - op.drop_index(op.f('ix_users_username'), table_name='users') - op.drop_table('users') - op.drop_index(op.f('ix_uniprot_identifiers_id'), table_name='uniprot_identifiers') - op.drop_table('uniprot_identifiers') - op.drop_index(op.f('ix_sra_identifiers_id'), table_name='sra_identifiers') - op.drop_table('sra_identifiers') - op.drop_index(op.f('ix_roles_name'), table_name='roles') - op.drop_table('roles') - op.drop_index(op.f('ix_refseq_identifiers_id'), table_name='refseq_identifiers') - op.drop_table('refseq_identifiers') - op.drop_index(op.f('ix_pubmed_identifiers_id'), table_name='pubmed_identifiers') - op.drop_table('pubmed_identifiers') - op.drop_index(op.f('ix_keywords_id'), table_name='keywords') - op.drop_table('keywords') - op.drop_index(op.f('ix_genome_identifiers_id'), table_name='genome_identifiers') - op.drop_table('genome_identifiers') - op.drop_index(op.f('ix_ensembl_identifiers_id'), table_name='ensembl_identifiers') - op.drop_table('ensembl_identifiers') - op.drop_index(op.f('ix_doi_identifiers_id'), table_name='doi_identifiers') - op.drop_table('doi_identifiers') + op.drop_table("uniprot_offsets") + op.drop_table("refseq_offsets") + op.drop_index(op.f("ix_reference_maps_id"), table_name="reference_maps") + op.drop_table("reference_maps") + op.drop_table("ensembl_offsets") + op.drop_index(op.f("ix_variants_id"), table_name="variants") + op.drop_table("variants") + op.drop_index(op.f("ix_target_genes_id"), table_name="target_genes") + op.drop_table("target_genes") + op.drop_table("scoreset_sra_identifiers") + op.drop_table("scoreset_pubmed_identifiers") + op.drop_table("scoreset_meta_analysis_sources") + op.drop_table("scoreset_keywords") + op.drop_table("scoreset_doi_identifiers") + op.drop_index(op.f("ix_scoresets_id"), table_name="scoresets") + op.drop_table("scoresets") + op.drop_table("experiment_sra_identifiers") + op.drop_table("experiment_pubmed_identifiers") + op.drop_table("experiment_keywords") + op.drop_table("experiment_doi_identifiers") + op.drop_index(op.f("ix_experiments_id"), table_name="experiments") + op.drop_table("experiments") + op.drop_table("experiment_set_sra_identifiers") + op.drop_table("experiment_set_pubmed_identifiers") + op.drop_table("experiment_set_keywords") + op.drop_table("experiment_set_doi_identifiers") + op.drop_table("users_roles") + op.drop_index(op.f("ix_reference_genomes_id"), table_name="reference_genomes") + op.drop_table("reference_genomes") + op.drop_index(op.f("ix_experiment_sets_id"), table_name="experiment_sets") + op.drop_table("experiment_sets") + op.drop_index(op.f("ix_access_keys_key_id"), table_name="access_keys") + op.drop_table("access_keys") + op.drop_index(op.f("ix_wild_type_sequences_id"), table_name="wild_type_sequences") + op.drop_table("wild_type_sequences") + op.drop_index(op.f("ix_users_username"), table_name="users") + op.drop_table("users") + op.drop_index(op.f("ix_uniprot_identifiers_id"), table_name="uniprot_identifiers") + op.drop_table("uniprot_identifiers") + op.drop_index(op.f("ix_sra_identifiers_id"), table_name="sra_identifiers") + op.drop_table("sra_identifiers") + op.drop_index(op.f("ix_roles_name"), table_name="roles") + op.drop_table("roles") + op.drop_index(op.f("ix_refseq_identifiers_id"), table_name="refseq_identifiers") + op.drop_table("refseq_identifiers") + op.drop_index(op.f("ix_pubmed_identifiers_id"), table_name="pubmed_identifiers") + op.drop_table("pubmed_identifiers") + op.drop_index(op.f("ix_keywords_id"), table_name="keywords") + op.drop_table("keywords") + op.drop_index(op.f("ix_genome_identifiers_id"), table_name="genome_identifiers") + op.drop_table("genome_identifiers") + op.drop_index(op.f("ix_ensembl_identifiers_id"), table_name="ensembl_identifiers") + op.drop_table("ensembl_identifiers") + op.drop_index(op.f("ix_doi_identifiers_id"), table_name="doi_identifiers") + op.drop_table("doi_identifiers") # ### end Alembic commands ### diff --git a/alembic/versions/c6154dd7d9b9_add_gene_name_column_to_target_.py b/alembic/versions/c6154dd7d9b9_add_gene_name_column_to_target_.py index 1c4937e3..4c5d55d6 100644 --- a/alembic/versions/c6154dd7d9b9_add_gene_name_column_to_target_.py +++ b/alembic/versions/c6154dd7d9b9_add_gene_name_column_to_target_.py @@ -5,9 +5,10 @@ Create Date: 2023-10-04 17:00:42.960917 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "c6154dd7d9b9" diff --git a/alembic/versions/d7e6f8c3b9dc_scoreset_mapping_columns.py b/alembic/versions/d7e6f8c3b9dc_scoreset_mapping_columns.py index 51f68187..c55c1d5a 100644 --- a/alembic/versions/d7e6f8c3b9dc_scoreset_mapping_columns.py +++ b/alembic/versions/d7e6f8c3b9dc_scoreset_mapping_columns.py @@ -6,10 +6,10 @@ """ -from alembic import op -from sqlalchemy.dialects import postgresql import sqlalchemy as sa +from sqlalchemy.dialects import postgresql +from alembic import op # revision identifiers, used by Alembic. revision = "d7e6f8c3b9dc" @@ -34,7 +34,7 @@ def upgrade(): name="mappingstate", native_enum=False, create_constraint=True, - length=32 + length=32, ), nullable=True, ), diff --git a/alembic/versions/da9ba478647d_add_primary_publication.py b/alembic/versions/da9ba478647d_add_primary_publication.py index e306bb1d..052f2a7a 100644 --- a/alembic/versions/da9ba478647d_add_primary_publication.py +++ b/alembic/versions/da9ba478647d_add_primary_publication.py @@ -5,9 +5,10 @@ Create Date: 2023-05-10 16:45:22.869575 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. revision = "da9ba478647d" diff --git a/alembic/versions/ec5d2787bec9_external_links.py b/alembic/versions/ec5d2787bec9_external_links.py index 45ab3f50..5df41872 100644 --- a/alembic/versions/ec5d2787bec9_external_links.py +++ b/alembic/versions/ec5d2787bec9_external_links.py @@ -5,19 +5,24 @@ Create Date: 2024-05-29 06:39:17.930675 """ -from alembic import op + import sqlalchemy as sa from sqlalchemy.dialects import postgresql +from alembic import op + # revision identifiers, used by Alembic. -revision = 'ec5d2787bec9' -down_revision = '7a345f1bf9c3' +revision = "ec5d2787bec9" +down_revision = "7a345f1bf9c3" branch_labels = None depends_on = None def upgrade(): - op.add_column("scoresets", sa.Column("external_links", postgresql.JSONB(astext_type=sa.Text()), nullable=False, server_default='{}')) + op.add_column( + "scoresets", + sa.Column("external_links", postgresql.JSONB(astext_type=sa.Text()), nullable=False, server_default="{}"), + ) def downgrade(): diff --git a/alembic/versions/f11fd758436e_.py b/alembic/versions/f11fd758436e_.py index b756dd94..c1be17ab 100644 --- a/alembic/versions/f11fd758436e_.py +++ b/alembic/versions/f11fd758436e_.py @@ -5,30 +5,31 @@ Create Date: 2023-05-16 15:03:22.252545 """ -from alembic import op + import sqlalchemy as sa +from alembic import op # revision identifiers, used by Alembic. -revision = 'f11fd758436e' -down_revision = '988ca84c701b' +revision = "f11fd758436e" +down_revision = "988ca84c701b" branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.drop_column('experiment_sets', 'method_text') - op.drop_column('experiment_sets', 'short_description') - op.drop_column('experiment_sets', 'title') - op.drop_column('experiment_sets', 'abstract_text') + op.drop_column("experiment_sets", "method_text") + op.drop_column("experiment_sets", "short_description") + op.drop_column("experiment_sets", "title") + op.drop_column("experiment_sets", "abstract_text") # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.add_column('experiment_sets', sa.Column('abstract_text', sa.VARCHAR(), autoincrement=False, nullable=False)) - op.add_column('experiment_sets', sa.Column('title', sa.VARCHAR(), autoincrement=False, nullable=False)) - op.add_column('experiment_sets', sa.Column('short_description', sa.VARCHAR(), autoincrement=False, nullable=False)) - op.add_column('experiment_sets', sa.Column('method_text', sa.VARCHAR(), autoincrement=False, nullable=False)) + op.add_column("experiment_sets", sa.Column("abstract_text", sa.VARCHAR(), autoincrement=False, nullable=False)) + op.add_column("experiment_sets", sa.Column("title", sa.VARCHAR(), autoincrement=False, nullable=False)) + op.add_column("experiment_sets", sa.Column("short_description", sa.VARCHAR(), autoincrement=False, nullable=False)) + op.add_column("experiment_sets", sa.Column("method_text", sa.VARCHAR(), autoincrement=False, nullable=False)) # ### end Alembic commands ### diff --git a/alembic/versions/f36cf612e029_additional_mapping_columns.py b/alembic/versions/f36cf612e029_additional_mapping_columns.py index 0141bc2a..5b8c896e 100644 --- a/alembic/versions/f36cf612e029_additional_mapping_columns.py +++ b/alembic/versions/f36cf612e029_additional_mapping_columns.py @@ -6,11 +6,12 @@ """ -from alembic import op +from datetime import datetime + import sqlalchemy as sa from sqlalchemy.dialects import postgresql -from datetime import datetime +from alembic import op # revision identifiers, used by Alembic. revision = "f36cf612e029" @@ -32,7 +33,7 @@ def upgrade(): "modification_date", sa.Date(), nullable=False, - server_default = sa.func.current_date(), + server_default=sa.func.current_date(), ), ) op.add_column( @@ -51,7 +52,7 @@ def upgrade(): op.add_column( "mapped_variants", sa.Column("current", sa.Boolean(), nullable=False, server_default=sa.false()), - ) + ) op.alter_column( "mapped_variants", "pre_mapped", diff --git a/alembic/versions/fecb3e0d181d_make_urns_unique.py b/alembic/versions/fecb3e0d181d_make_urns_unique.py index 83734b6c..ede6a65e 100644 --- a/alembic/versions/fecb3e0d181d_make_urns_unique.py +++ b/alembic/versions/fecb3e0d181d_make_urns_unique.py @@ -8,7 +8,6 @@ from alembic import op - # revision identifiers, used by Alembic. revision = "fecb3e0d181d" down_revision = "c6154dd7d9b9" diff --git a/mypy_stubs/bioutils/assemblies.pyi b/mypy_stubs/bioutils/assemblies.pyi index 5d0eae79..1c1a5f60 100644 --- a/mypy_stubs/bioutils/assemblies.pyi +++ b/mypy_stubs/bioutils/assemblies.pyi @@ -1,8 +1,7 @@ from typing import Union - def get_assembly_names() -> list[str]: ... def get_assembly(name: str) -> dict[str, Union[str, int, list[str]]]: ... def get_assemblies(names: list[str] = []) -> dict[str, dict[str, Union[str, int, list[str]]]]: ... -def make_name_ac_map(assy_name : str, primary_only: bool = False) -> dict[str, str]: ... -def make_ac_name_map(assy_name : str, primary_only: bool = False) -> dict[str, str]: ... +def make_name_ac_map(assy_name: str, primary_only: bool = False) -> dict[str, str]: ... +def make_ac_name_map(assy_name: str, primary_only: bool = False) -> dict[str, str]: ... diff --git a/mypy_stubs/cdot/hgvs/dataproviders/fasta_seqfetcher.pyi b/mypy_stubs/cdot/hgvs/dataproviders/fasta_seqfetcher.pyi index df5f84bb..3df0a0ec 100644 --- a/mypy_stubs/cdot/hgvs/dataproviders/fasta_seqfetcher.pyi +++ b/mypy_stubs/cdot/hgvs/dataproviders/fasta_seqfetcher.pyi @@ -1,4 +1,5 @@ from typing import Union + from hgvs.dataproviders.seqfetcher import SeqFetcher class FastaSeqFetcher: @@ -7,4 +8,3 @@ class FastaSeqFetcher: class ChainedSeqFetcher: seq_fetchers: list[Union[SeqFetcher, FastaSeqFetcher]] def __init__(self, *args) -> None: ... - diff --git a/mypy_stubs/cdot/hgvs/dataproviders/json_data_provider.pyi b/mypy_stubs/cdot/hgvs/dataproviders/json_data_provider.pyi index c62e62ed..2e67a3db 100644 --- a/mypy_stubs/cdot/hgvs/dataproviders/json_data_provider.pyi +++ b/mypy_stubs/cdot/hgvs/dataproviders/json_data_provider.pyi @@ -1,11 +1,11 @@ import abc +from typing import List, Optional, Union + from _typeshed import Incomplete from cdot.assembly_helper import get_ac_name_map as get_ac_name_map +from cdot.hgvs.dataproviders import ChainedSeqFetcher, FastaSeqFetcher from hgvs.dataproviders.interface import Interface -from typing import List, Optional, Union from hgvs.dataproviders.seqfetcher import SeqFetcher -from cdot.hgvs.dataproviders import ChainedSeqFetcher, FastaSeqFetcher - class AbstractJSONDataProvider(Interface, metaclass=abc.ABCMeta): NCBI_ALN_METHOD: str @@ -13,7 +13,13 @@ class AbstractJSONDataProvider(Interface, metaclass=abc.ABCMeta): seqfetcher: Incomplete assembly_maps: Incomplete assembly_by_contig: Incomplete - def __init__(self, assemblies: Optional[List[str]] = None, mode: Optional[str] = None, cache: Optional[str] = None, seqfetcher: Union[SeqFetcher, ChainedSeqFetcher, FastaSeqFetcher, None] = None) -> None: ... + def __init__( + self, + assemblies: Optional[List[str]] = None, + mode: Optional[str] = None, + cache: Optional[str] = None, + seqfetcher: Union[SeqFetcher, ChainedSeqFetcher, FastaSeqFetcher, None] = None, + ) -> None: ... def data_version(self) -> str: ... def schema_version(self) -> str: ... def get_assembly_map(self, assembly_name) -> dict[str, str]: ... @@ -38,11 +44,24 @@ class LocalDataProvider(AbstractJSONDataProvider, metaclass=abc.ABCMeta): def get_tx_for_region(self, alt_ac, alt_aln_method, start_i, end_i): ... class JSONDataProvider(LocalDataProvider): - def __init__(self, file_or_filename_list, mode: Incomplete | None = None, cache: Incomplete | None = None, seqfetcher: Union[SeqFetcher, ChainedSeqFetcher, FastaSeqFetcher, None] = None) -> None: ... + def __init__( + self, + file_or_filename_list, + mode: Incomplete | None = None, + cache: Incomplete | None = None, + seqfetcher: Union[SeqFetcher, ChainedSeqFetcher, FastaSeqFetcher, None] = None, + ) -> None: ... def get_pro_ac_for_tx_ac(self, tx_ac): ... def get_gene_info(self, gene): ... class RESTDataProvider(AbstractJSONDataProvider): - def __init__(self, url: Optional[str] = None, secure: bool = True, mode: Optional[str] = None, cache: Optional[str] = None, seqfetcher: Union[SeqFetcher, ChainedSeqFetcher, FastaSeqFetcher, None] = None) -> None: ... + def __init__( + self, + url: Optional[str] = None, + secure: bool = True, + mode: Optional[str] = None, + cache: Optional[str] = None, + seqfetcher: Union[SeqFetcher, ChainedSeqFetcher, FastaSeqFetcher, None] = None, + ) -> None: ... def get_tx_for_gene(self, gene_name): ... def get_tx_for_region(self, alt_ac, alt_aln_method, start_i, end_i): ... diff --git a/mypy_stubs/hgvs/dataproviders/interface.pyi b/mypy_stubs/hgvs/dataproviders/interface.pyi index ee441428..223cdd32 100644 --- a/mypy_stubs/hgvs/dataproviders/interface.pyi +++ b/mypy_stubs/hgvs/dataproviders/interface.pyi @@ -1,6 +1,4 @@ from typing import Optional - class Interface: - def __init__(self, mode: Optional[str] = None, cache: Optional[str] = None): ... diff --git a/mypy_stubs/hgvs/dataproviders/seqfetcher.pyi b/mypy_stubs/hgvs/dataproviders/seqfetcher.pyi index a43d5045..35f8cf5f 100644 --- a/mypy_stubs/hgvs/dataproviders/seqfetcher.pyi +++ b/mypy_stubs/hgvs/dataproviders/seqfetcher.pyi @@ -1,3 +1,2 @@ class SeqFetcher(object): - def __init__(self): ... diff --git a/mypy_stubs/hgvs/dataproviders/uta.pyi b/mypy_stubs/hgvs/dataproviders/uta.pyi index 3224ac27..ace9b7b1 100644 --- a/mypy_stubs/hgvs/dataproviders/uta.pyi +++ b/mypy_stubs/hgvs/dataproviders/uta.pyi @@ -1,9 +1,14 @@ from typing import Any, Optional -import hgvs +import hgvs class UTABase: pass - -def connect(db_url: Optional[str] = None, pooling: bool = hgvs.global_config.uta.pooling, application_name: Optional[str] = None, mode: Any = None, cache: Any = None) -> UTABase: ... +def connect( + db_url: Optional[str] = None, + pooling: bool = hgvs.global_config.uta.pooling, + application_name: Optional[str] = None, + mode: Any = None, + cache: Any = None, +) -> UTABase: ... diff --git a/mypy_stubs/hgvs/exceptions.pyi b/mypy_stubs/hgvs/exceptions.pyi index 70d915bd..efb5fa76 100644 --- a/mypy_stubs/hgvs/exceptions.pyi +++ b/mypy_stubs/hgvs/exceptions.pyi @@ -27,4 +27,3 @@ class HGVSUsageError(HGVSError): class HGVSVerifyFailedError(HGVSError): pass - diff --git a/mypy_stubs/hgvs/parser.pyi b/mypy_stubs/hgvs/parser.pyi index 229ff555..f1ad7469 100644 --- a/mypy_stubs/hgvs/parser.pyi +++ b/mypy_stubs/hgvs/parser.pyi @@ -1,6 +1,7 @@ -import hgvs.sequencevariant from typing import Optional +import hgvs.sequencevariant + class Parser: def __init__(self, grammar_fn: Optional[str] = None, export_all_rules: bool = False) -> None: ... def parse(self, v: str) -> hgvs.sequencevariant.SequenceVariant: ... diff --git a/mypy_stubs/hgvs/validator.pyi b/mypy_stubs/hgvs/validator.pyi index 81e93fd8..6e7f7118 100644 --- a/mypy_stubs/hgvs/validator.pyi +++ b/mypy_stubs/hgvs/validator.pyi @@ -1,6 +1,6 @@ from typing import Any, Optional -import hgvs +import hgvs class Validator: def __init__(self, hdp: Any, strict: bool = hgvs.global_config.validator.strict): ... diff --git a/mypy_stubs/idutils/__init__.pyi b/mypy_stubs/idutils/__init__.pyi index 7312073c..6005cad0 100644 --- a/mypy_stubs/idutils/__init__.pyi +++ b/mypy_stubs/idutils/__init__.pyi @@ -1,5 +1,5 @@ -from typing import Callable, Mapping, Sequence, Tuple, Union from re import Match, Pattern +from typing import Callable, Mapping, Sequence, Tuple, Union ENSEMBL_PREFIXES: Sequence[str] ARRAYEXPRESS_CODES: Sequence[str] @@ -79,11 +79,9 @@ def is_swh(val: str) -> Union[Match[str], None]: ... def is_ror(val: str) -> Union[Match[str], None]: ... def is_viaf(val: str) -> bool: ... - PID_SCHEMES: Sequence[Tuple[str, Callable[[str], Union[Match[str], bool, None]]]] SCHEME_FILTER: Sequence[Tuple[str, list[str]]] - def detect_identifier_schemes(val: str) -> list[str]: ... def normalize_doi(val: str) -> str: ... def normalize_handle(val: str) -> str: ... @@ -102,6 +100,6 @@ def normalize_pid(val: str, scheme: str) -> str: ... LANDING_URLS: Mapping[str, str] -def to_url(val: str, scheme: str, url_scheme: str = 'http') -> str: ... +def to_url(val: str, scheme: str, url_scheme: str = "http") -> str: ... __version__: str diff --git a/mypy_stubs/mavehgvs/__init__.py b/mypy_stubs/mavehgvs/__init__.py index a25b6333..3679a3c1 100644 --- a/mypy_stubs/mavehgvs/__init__.py +++ b/mypy_stubs/mavehgvs/__init__.py @@ -1,3 +1,3 @@ -from .variant import Variant as Variant -from .position import VariantPosition as VariantPosition from .exceptions import MaveHgvsParseError as MaveHgvsParseError +from .position import VariantPosition as VariantPosition +from .variant import Variant as Variant diff --git a/mypy_stubs/mavehgvs/position.pyi b/mypy_stubs/mavehgvs/position.pyi index 4c5f7731..ef423923 100644 --- a/mypy_stubs/mavehgvs/position.pyi +++ b/mypy_stubs/mavehgvs/position.pyi @@ -1,19 +1,10 @@ class VariantPosition: - def __repr__(self) -> str: ... - - def __lt__(self, other : object) -> bool: ... - - def __eq__(self, other : object) -> bool: ... - - def __ne__(self, other : object) -> bool: ... - + def __lt__(self, other: object) -> bool: ... + def __eq__(self, other: object) -> bool: ... + def __ne__(self, other: object) -> bool: ... def is_utr(self) -> bool: ... - def is_intronic(self) -> bool: ... - def is_protein(self) -> bool: ... - def is_extended(self) -> bool: ... - - def is_adjacent(self, other : object) -> bool: ... + def is_adjacent(self, other: object) -> bool: ... diff --git a/mypy_stubs/mavehgvs/variant.pyi b/mypy_stubs/mavehgvs/variant.pyi index 6f364265..35086b3d 100644 --- a/mypy_stubs/mavehgvs/variant.pyi +++ b/mypy_stubs/mavehgvs/variant.pyi @@ -1,14 +1,13 @@ -from typing import Optional, Union, Tuple, List, Sequence, Mapping, Any -from .position import VariantPosition +from typing import Any, List, Mapping, Optional, Sequence, Tuple, Union +from .position import VariantPosition class Variant: - def __init__( self, s: Union[str, Mapping[str, Any], Sequence[Mapping[str, Any]]], targetseq: Optional[str] = None, - relaxed_ordering: bool = False + relaxed_ordering: bool = False, ): ... positions: Optional[ @@ -21,9 +20,4 @@ class Variant: prefix: str - sequence: Union[ - str, - Tuple[str, str], - List[Optional[Union[str, Tuple[str, str]]]], - None - ] + sequence: Union[str, Tuple[str, str], List[Optional[Union[str, Tuple[str, str]]]], None] diff --git a/src/mavedb/data_providers/services.py b/src/mavedb/data_providers/services.py index 74e7bf45..3d16a8e5 100644 --- a/src/mavedb/data_providers/services.py +++ b/src/mavedb/data_providers/services.py @@ -1,8 +1,8 @@ import os -import requests from datetime import date from typing import Optional, TypedDict +import requests from cdot.hgvs.dataproviders import ChainedSeqFetcher, FastaSeqFetcher, RESTDataProvider GENOMIC_FASTA_FILES = [ diff --git a/src/mavedb/db/base.py b/src/mavedb/db/base.py index 2dfb2325..15da6f09 100644 --- a/src/mavedb/db/base.py +++ b/src/mavedb/db/base.py @@ -12,4 +12,4 @@ class Base: # Generate __tablename__ automatically # Declared in this odd way to provide correct type hint for mypy - __tablename__ : Union[declared_attr[Any], str] = declared_attr(lambda cls: cls.__name__.lower()) + __tablename__: Union[declared_attr[Any], str] = declared_attr(lambda cls: cls.__name__.lower()) diff --git a/src/mavedb/deps.py b/src/mavedb/deps.py index 0ab7a8ad..9b4e278a 100644 --- a/src/mavedb/deps.py +++ b/src/mavedb/deps.py @@ -2,13 +2,13 @@ from typing import Any, AsyncGenerator, Generator -from arq import create_pool, ArqRedis +from arq import ArqRedis, create_pool from cdot.hgvs.dataproviders import RESTDataProvider from sqlalchemy.orm import Session +from mavedb.data_providers.services import cdot_rest from mavedb.db.session import SessionLocal from mavedb.worker.settings import RedisWorkerSettings -from mavedb.data_providers.services import cdot_rest def get_db() -> Generator[Session, Any, None]: diff --git a/src/mavedb/lib/authentication.py b/src/mavedb/lib/authentication.py index b3a3a617..b82faf3b 100644 --- a/src/mavedb/lib/authentication.py +++ b/src/mavedb/lib/authentication.py @@ -1,11 +1,11 @@ +import logging +import os from dataclasses import dataclass from datetime import datetime from enum import Enum -import logging -import os from typing import Optional -from fastapi import Depends, Request, HTTPException, Security, Header +from fastapi import Depends, Header, HTTPException, Request, Security from fastapi.security import ( APIKeyCookie, APIKeyHeader, @@ -17,10 +17,10 @@ from sqlalchemy.orm import Session from mavedb import deps -from mavedb.models.enums.user_role import UserRole +from mavedb.lib.logging.context import format_raised_exception_info_as_dict, logging_context, save_to_logging_context from mavedb.lib.orcid import fetch_orcid_user_email -from mavedb.lib.logging.context import logging_context, save_to_logging_context, format_raised_exception_info_as_dict from mavedb.models.access_key import AccessKey +from mavedb.models.enums.user_role import UserRole from mavedb.models.user import User ORCID_JWT_SIGNING_PUBLIC_KEY = """-----BEGIN PUBLIC KEY----- diff --git a/src/mavedb/lib/authorization.py b/src/mavedb/lib/authorization.py index 7bd9a678..9b30cb86 100644 --- a/src/mavedb/lib/authorization.py +++ b/src/mavedb/lib/authorization.py @@ -4,11 +4,10 @@ from fastapi import Depends, HTTPException from starlette import status -from mavedb.lib.authentication import get_current_user, UserData +from mavedb.lib.authentication import UserData, get_current_user from mavedb.lib.logging.context import logging_context, save_to_logging_context from mavedb.models.enums.user_role import UserRole - logger = logging.getLogger(__name__) diff --git a/src/mavedb/lib/contributors.py b/src/mavedb/lib/contributors.py index 4e5eecae..4edfce97 100644 --- a/src/mavedb/lib/contributors.py +++ b/src/mavedb/lib/contributors.py @@ -1,10 +1,11 @@ import logging + from sqlalchemy.orm import Session from mavedb.lib import orcid from mavedb.lib.exceptions import NonexistentOrcidUserError -from mavedb.models.contributor import Contributor from mavedb.lib.logging.context import logging_context, save_to_logging_context +from mavedb.models.contributor import Contributor logger = logging.getLogger(__name__) diff --git a/src/mavedb/lib/experiments.py b/src/mavedb/lib/experiments.py index a71e945b..fcdb3814 100644 --- a/src/mavedb/lib/experiments.py +++ b/src/mavedb/lib/experiments.py @@ -4,20 +4,22 @@ from sqlalchemy import func, or_ from sqlalchemy.orm import Session +from mavedb.lib.logging.context import logging_context, save_to_logging_context from mavedb.models.contributor import Contributor -from mavedb.lib.logging.context import save_to_logging_context, logging_context +from mavedb.models.controlled_keyword import ControlledKeyword from mavedb.models.experiment import Experiment +from mavedb.models.experiment_controlled_keyword import ExperimentControlledKeywordAssociation +from mavedb.models.publication_identifier import PublicationIdentifier from mavedb.models.score_set import ScoreSet from mavedb.models.user import User from mavedb.view_models.search import ExperimentsSearch -from mavedb.models.publication_identifier import PublicationIdentifier -from mavedb.models.controlled_keyword import ControlledKeyword -from mavedb.models.experiment_controlled_keyword import ExperimentControlledKeywordAssociation logger = logging.getLogger(__name__) -def search_experiments(db: Session, owner_or_contributor: Optional[User], search: ExperimentsSearch) -> list[Experiment]: +def search_experiments( + db: Session, owner_or_contributor: Optional[User], search: ExperimentsSearch +) -> list[Experiment]: save_to_logging_context({"experiment_search_criteria": search.dict()}) query = db.query(Experiment) diff --git a/src/mavedb/lib/identifiers.py b/src/mavedb/lib/identifiers.py index d44b02c3..91410cd4 100644 --- a/src/mavedb/lib/identifiers.py +++ b/src/mavedb/lib/identifiers.py @@ -1,17 +1,17 @@ import os -from typing import Optional, Union, Mapping +from typing import Mapping, Optional, Union import eutils # type: ignore -from idutils import is_doi, normalize_doi from eutils import EutilsNCBIError # type: ignore from eutils._internal.xmlfacades.pubmedarticle import PubmedArticle # type: ignore from eutils._internal.xmlfacades.pubmedarticleset import PubmedArticleSet # type: ignore +from idutils import is_doi, normalize_doi from sqlalchemy import select from sqlalchemy.orm import Session from mavedb.lib.exceptions import AmbiguousIdentifierError, NonexistentIdentifierError -from mavedb.lib.external_publications import Rxiv, Crossref, CrossrefWork, RxivContentDetail, PublicationAuthors -from mavedb.lib.validation.publication import identifier_valid_for, validate_db_name, infer_identifier_from_url +from mavedb.lib.external_publications import Crossref, CrossrefWork, PublicationAuthors, Rxiv, RxivContentDetail +from mavedb.lib.validation.publication import identifier_valid_for, infer_identifier_from_url, validate_db_name from mavedb.models.doi_identifier import DoiIdentifier from mavedb.models.ensembl_identifier import EnsemblIdentifier from mavedb.models.ensembl_offset import EnsemblOffset diff --git a/src/mavedb/lib/keywords.py b/src/mavedb/lib/keywords.py index 53e1bf53..7cbfadc9 100644 --- a/src/mavedb/lib/keywords.py +++ b/src/mavedb/lib/keywords.py @@ -1,7 +1,8 @@ -from sqlalchemy.orm import Session -from sqlalchemy import func from typing import Optional +from sqlalchemy import func +from sqlalchemy.orm import Session + from mavedb.models.controlled_keyword import ControlledKeyword @@ -16,8 +17,5 @@ def search_keyword(db: Session, key: str, value: Optional[str]): controlled_keyword = query.one_or_none() if controlled_keyword is None: - raise ValueError(f'Invalid keyword {key} or {value}') + raise ValueError(f"Invalid keyword {key} or {value}") return controlled_keyword - - - diff --git a/src/mavedb/lib/logging/canonical.py b/src/mavedb/lib/logging/canonical.py index 6d639a94..430d1f91 100644 --- a/src/mavedb/lib/logging/canonical.py +++ b/src/mavedb/lib/logging/canonical.py @@ -7,9 +7,8 @@ from starlette.responses import Response from mavedb import __version__ -from mavedb.lib.logging.models import Source, LogType -from mavedb.lib.logging.context import save_to_logging_context, logging_context - +from mavedb.lib.logging.context import logging_context, save_to_logging_context +from mavedb.lib.logging.models import LogType, Source logger = logging.getLogger(__name__) diff --git a/src/mavedb/lib/logging/context.py b/src/mavedb/lib/logging/context.py index 5dc37fb5..6771f760 100644 --- a/src/mavedb/lib/logging/context.py +++ b/src/mavedb/lib/logging/context.py @@ -1,20 +1,17 @@ import logging -import time import os import sys +import time import traceback +from typing import Any, Optional, Union -from typing import Any, Union, Optional - - -from starlette.requests import Request, HTTPConnection -from starlette_context.middleware import RawContextMiddleware +from starlette.requests import HTTPConnection, Request from starlette_context import context +from starlette_context.middleware import RawContextMiddleware from mavedb import __project__, __version__ from mavedb.lib.logging.models import Source - FRONTEND_URL = os.getenv("FRONTEND_URL", "") API_URL = os.getenv("API_URL", "") diff --git a/src/mavedb/lib/logging/logged_route.py b/src/mavedb/lib/logging/logged_route.py index 83112420..b99f59aa 100644 --- a/src/mavedb/lib/logging/logged_route.py +++ b/src/mavedb/lib/logging/logged_route.py @@ -1,9 +1,9 @@ -from fastapi import Response, Request -from fastapi.routing import APIRoute -from starlette.background import BackgroundTask, BackgroundTasks +import time from typing import Callable -import time +from fastapi import Request, Response +from fastapi.routing import APIRoute +from starlette.background import BackgroundTask, BackgroundTasks from mavedb.lib.logging.canonical import log_request from mavedb.lib.logging.context import save_to_logging_context diff --git a/src/mavedb/lib/mave/hgvs.py b/src/mavedb/lib/mave/hgvs.py index e27be8fe..36878de9 100644 --- a/src/mavedb/lib/mave/hgvs.py +++ b/src/mavedb/lib/mave/hgvs.py @@ -1,12 +1,12 @@ from functools import partial from typing import Optional, Union -from mavedb.lib.exceptions import ValidationError +from mavehgvs import MaveHgvsParseError, Variant -from mavehgvs import Variant, MaveHgvsParseError +from mavedb.lib.exceptions import ValidationError -from .utils import is_csv_null from .constants import HGVS_NT_COLUMN, HGVS_PRO_COLUMN, HGVS_SPLICE_COLUMN +from .utils import is_csv_null def validate_hgvs_string( diff --git a/src/mavedb/lib/mave/variant.py b/src/mavedb/lib/mave/variant.py index 559d6248..40d21d38 100644 --- a/src/mavedb/lib/mave/variant.py +++ b/src/mavedb/lib/mave/variant.py @@ -3,9 +3,9 @@ from mavedb.lib.exceptions import ValidationError from mavedb.lib.mave.constants import ( - VARIANT_SCORE_DATA, - VARIANT_COUNT_DATA, REQUIRED_SCORE_COLUMN, + VARIANT_COUNT_DATA, + VARIANT_SCORE_DATA, ) diff --git a/src/mavedb/lib/orcid.py b/src/mavedb/lib/orcid.py index 7b713e17..25a0ecc3 100644 --- a/src/mavedb/lib/orcid.py +++ b/src/mavedb/lib/orcid.py @@ -4,8 +4,8 @@ import orcid # type: ignore -from mavedb.view_models.orcid import OrcidUser from mavedb.lib.logging.context import logging_context, save_to_logging_context +from mavedb.view_models.orcid import OrcidUser logger = logging.getLogger(__name__) diff --git a/src/mavedb/lib/permissions.py b/src/mavedb/lib/permissions.py index a59b1138..0a3af714 100644 --- a/src/mavedb/lib/permissions.py +++ b/src/mavedb/lib/permissions.py @@ -2,9 +2,9 @@ from enum import Enum from typing import Optional -from mavedb.lib.authentication import UserData -from mavedb.lib.logging.context import save_to_logging_context, logging_context from mavedb.db.base import Base +from mavedb.lib.authentication import UserData +from mavedb.lib.logging.context import logging_context, save_to_logging_context from mavedb.models.enums.user_role import UserRole from mavedb.models.experiment import Experiment from mavedb.models.experiment_set import ExperimentSet @@ -15,14 +15,14 @@ class Action(Enum): - READ = 'read' - UPDATE = 'update' - DELETE = 'delete' - ADD_EXPERIMENT = 'add_experiment' - ADD_SCORE_SET = 'add_score_set' - SET_SCORES = 'set_scores' - ADD_ROLE = 'add_role' - PUBLISH = 'publish' + READ = "read" + UPDATE = "update" + DELETE = "delete" + ADD_EXPERIMENT = "add_experiment" + ADD_SCORE_SET = "add_score_set" + SET_SCORES = "set_scores" + ADD_ROLE = "add_role" + PUBLISH = "publish" class PermissionResponse: diff --git a/src/mavedb/lib/score_sets.py b/src/mavedb/lib/score_sets.py index 1729d058..8384ecbf 100644 --- a/src/mavedb/lib/score_sets.py +++ b/src/mavedb/lib/score_sets.py @@ -1,16 +1,17 @@ import csv import io -import re import logging +import re from typing import Any, BinaryIO, Iterable, Optional, Sequence import numpy as np import pandas as pd from pandas.testing import assert_index_equal -from sqlalchemy import cast, func, Integer, or_, select -from sqlalchemy.orm import aliased, contains_eager, joinedload, selectinload, Session +from sqlalchemy import Integer, cast, func, or_, select +from sqlalchemy.orm import Session, aliased, contains_eager, joinedload, selectinload from mavedb.lib.exceptions import ValidationError +from mavedb.lib.logging.context import logging_context, save_to_logging_context from mavedb.lib.mave.constants import ( HGVS_NT_COLUMN, HGVS_PRO_COLUMN, @@ -18,31 +19,30 @@ VARIANT_COUNT_DATA, VARIANT_SCORE_DATA, ) -from mavedb.lib.validation.constants.general import null_values_list -from mavedb.lib.logging.context import save_to_logging_context, logging_context from mavedb.lib.mave.utils import is_csv_null +from mavedb.lib.validation.constants.general import null_values_list from mavedb.models.contributor import Contributor from mavedb.models.controlled_keyword import ControlledKeyword from mavedb.models.doi_identifier import DoiIdentifier -from mavedb.models.ensembl_offset import EnsemblOffset from mavedb.models.ensembl_identifier import EnsemblIdentifier +from mavedb.models.ensembl_offset import EnsemblOffset from mavedb.models.experiment import Experiment from mavedb.models.experiment_controlled_keyword import ExperimentControlledKeywordAssociation from mavedb.models.experiment_publication_identifier import ExperimentPublicationIdentifierAssociation from mavedb.models.experiment_set import ExperimentSet from mavedb.models.publication_identifier import PublicationIdentifier +from mavedb.models.refseq_identifier import RefseqIdentifier +from mavedb.models.refseq_offset import RefseqOffset +from mavedb.models.score_set import ScoreSet from mavedb.models.score_set_publication_identifier import ( ScoreSetPublicationIdentifierAssociation, ) -from mavedb.models.refseq_offset import RefseqOffset -from mavedb.models.refseq_identifier import RefseqIdentifier -from mavedb.models.score_set import ScoreSet from mavedb.models.target_accession import TargetAccession from mavedb.models.target_gene import TargetGene from mavedb.models.target_sequence import TargetSequence -from mavedb.models.uniprot_offset import UniprotOffset from mavedb.models.taxonomy import Taxonomy from mavedb.models.uniprot_identifier import UniprotIdentifier +from mavedb.models.uniprot_offset import UniprotOffset from mavedb.models.user import User from mavedb.models.variant import Variant from mavedb.view_models.search import ScoreSetsSearch @@ -479,8 +479,7 @@ def arrays_equal(array1: np.ndarray, array2: np.ndarray): return array1.shape == array2.shape and all( # note that each of the three expressions here is a boolean ndarray # so combining them with bitwise `&` and `|` works: - (pd.isnull(array1) & pd.isnull(array2)) - | (array1 == array2) + (pd.isnull(array1) & pd.isnull(array2)) | (array1 == array2) ) diff --git a/src/mavedb/lib/script_environment.py b/src/mavedb/lib/script_environment.py index 7761775c..d81e909d 100644 --- a/src/mavedb/lib/script_environment.py +++ b/src/mavedb/lib/script_environment.py @@ -2,7 +2,7 @@ Environment setup for scripts. """ -from sqlalchemy.orm import configure_mappers, Session +from sqlalchemy.orm import Session, configure_mappers from mavedb import deps from mavedb.models import * # noqa: F403 diff --git a/src/mavedb/lib/slack.py b/src/mavedb/lib/slack.py index a7979296..22786b69 100644 --- a/src/mavedb/lib/slack.py +++ b/src/mavedb/lib/slack.py @@ -1,5 +1,5 @@ -import os import json +import os import sys import traceback diff --git a/src/mavedb/lib/taxonomies.py b/src/mavedb/lib/taxonomies.py index e33ec802..f25cf328 100644 --- a/src/mavedb/lib/taxonomies.py +++ b/src/mavedb/lib/taxonomies.py @@ -1,7 +1,8 @@ +from typing import Any + import httpx from fastapi.exceptions import HTTPException from sqlalchemy.orm import Session -from typing import Any from mavedb.models.taxonomy import Taxonomy from mavedb.view_models.taxonomy import TaxonomyCreate diff --git a/src/mavedb/lib/urns.py b/src/mavedb/lib/urns.py index 81bef8ad..534264cf 100644 --- a/src/mavedb/lib/urns.py +++ b/src/mavedb/lib/urns.py @@ -1,3 +1,4 @@ +import logging import re import string @@ -8,8 +9,6 @@ from mavedb.models.experiment_set import ExperimentSet from mavedb.models.score_set import ScoreSet -import logging - logger = logging.getLogger(__name__) @@ -28,9 +27,7 @@ def generate_experiment_set_urn(db: Session): # TODO We can't use func.max if an experiment set URN's numeric part will ever have anything other than 8 digits, # because we rely on the order guaranteed by zero-padding. This assumption is valid until we have 99999999 # experiment sets. - row = ( - db.query(func.max(ExperimentSet.urn)).filter(ExperimentSet.urn.op("~")("^urn:mavedb:[0-9]+$")).one_or_none() - ) + row = db.query(func.max(ExperimentSet.urn)).filter(ExperimentSet.urn.op("~")("^urn:mavedb:[0-9]+$")).one_or_none() max_urn_number = 0 if row and row[0]: max_urn = row[0] diff --git a/src/mavedb/lib/validation/dataframe.py b/src/mavedb/lib/validation/dataframe.py index c4a90b1b..2d7bdffc 100644 --- a/src/mavedb/lib/validation/dataframe.py +++ b/src/mavedb/lib/validation/dataframe.py @@ -3,10 +3,9 @@ import hgvs.exceptions import hgvs.parser import hgvs.validator -from cdot.hgvs.dataproviders import RESTDataProvider import numpy as np import pandas as pd - +from cdot.hgvs.dataproviders import RESTDataProvider from fqfa.util.translate import translate_dna from mavehgvs.exceptions import MaveHgvsParseError from mavehgvs.variant import Variant diff --git a/src/mavedb/lib/validation/identifier.py b/src/mavedb/lib/validation/identifier.py index 652b178b..e6e2dcde 100644 --- a/src/mavedb/lib/validation/identifier.py +++ b/src/mavedb/lib/validation/identifier.py @@ -1,8 +1,8 @@ import idutils +from mavedb.lib.validation.constants.identifier import valid_dbnames from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.utilities import is_null -from mavedb.lib.validation.constants.identifier import valid_dbnames def validate_db_name(db_name: str): diff --git a/src/mavedb/lib/validation/keywords.py b/src/mavedb/lib/validation/keywords.py index df90a8b5..523a8d50 100644 --- a/src/mavedb/lib/validation/keywords.py +++ b/src/mavedb/lib/validation/keywords.py @@ -8,8 +8,7 @@ def validate_description(value: str, key: str, description: Optional[str]): if value.lower() == "other" and (description is None or description.strip() == ""): raise ValidationError( - "Other option does not allow empty description.", - custom_loc=["body", "keywordDescriptions", key] + "Other option does not allow empty description.", custom_loc=["body", "keywordDescriptions", key] ) @@ -44,9 +43,7 @@ def validate_keyword(keyword: str): If keyword is invalid or null. """ if is_null(keyword) or not isinstance(keyword, str): - raise ValidationError( - "{} are not valid keyword. Keyword must be a non null list of strings.".format(keyword) - ) + raise ValidationError("{} are not valid keyword. Keyword must be a non null list of strings.".format(keyword)) def validate_keyword_keys(keywords: list): @@ -54,8 +51,10 @@ def validate_keyword_keys(keywords: list): variant_library_method = keyword_dict.get("variant library creation method", "") if variant_library_method == "endogenous locus library method": - if not ("endogenous locus library method system" in keyword_dict and - "endogenous locus library method mechanism" in keyword_dict): + if not ( + "endogenous locus library method system" in keyword_dict + and "endogenous locus library method mechanism" in keyword_dict + ): raise ValidationError( "If 'Variant Library Creation Method' is 'Endogenous locus library method', " "both 'Endogenous Locus Library Method System' and 'Endogenous Locus Library Method Mechanism' " @@ -63,8 +62,10 @@ def validate_keyword_keys(keywords: list): ) elif variant_library_method == "in vitro construct library method": - if not ("in vitro construct library method system" in keyword_dict and - "in vitro construct library method mechanism" in keyword_dict): + if not ( + "in vitro construct library method system" in keyword_dict + and "in vitro construct library method mechanism" in keyword_dict + ): raise ValidationError( "If 'Variant Library Creation Method' is 'In vitro construct library method', " "both 'In Vitro Construct Library Method System' and 'In Vitro Construct Library Method Mechanism' " @@ -72,12 +73,15 @@ def validate_keyword_keys(keywords: list): ) elif variant_library_method == "other": - if any(k in keyword_dict for k in [ - "endogenous locus library method system", - "endogenous locus library method mechanism", - "in vitro construct library method system", - "in vitro construct library method mechanism" - ]): + if any( + k in keyword_dict + for k in [ + "endogenous locus library method system", + "endogenous locus library method mechanism", + "in vitro construct library method system", + "in vitro construct library method mechanism", + ] + ): raise ValidationError( "If 'Variant Library Creation Method' is 'Other', none of " "'Endogenous Locus Library Method System', 'Endogenous Locus Library Method Mechanism', " diff --git a/src/mavedb/lib/validation/publication.py b/src/mavedb/lib/validation/publication.py index dab399c1..f440f5ee 100644 --- a/src/mavedb/lib/validation/publication.py +++ b/src/mavedb/lib/validation/publication.py @@ -1,10 +1,10 @@ -import idutils import datetime - from urllib.parse import urlparse -from mavedb.lib.validation.exceptions import ValidationError +import idutils + from mavedb.lib.validation.constants.publication import valid_dbnames +from mavedb.lib.validation.exceptions import ValidationError def validate_db_name(db_name: str): diff --git a/src/mavedb/lib/validation/target.py b/src/mavedb/lib/validation/target.py index 7f9d968a..f22121ac 100644 --- a/src/mavedb/lib/validation/target.py +++ b/src/mavedb/lib/validation/target.py @@ -1,8 +1,9 @@ -from mavedb.lib.validation.exceptions import ValidationError -from mavedb.lib.validation.constants.target import valid_categories, valid_sequence_types from fqfa import infer_sequence_type from fqfa.validator import amino_acids_validator, dna_bases_validator +from mavedb.lib.validation.constants.target import valid_categories, valid_sequence_types +from mavedb.lib.validation.exceptions import ValidationError + def validate_target_category(category: str): """ diff --git a/src/mavedb/lib/validation/utilities.py b/src/mavedb/lib/validation/utilities.py index b7a97bbd..2712c886 100644 --- a/src/mavedb/lib/validation/utilities.py +++ b/src/mavedb/lib/validation/utilities.py @@ -2,11 +2,11 @@ from random import choice from typing import Optional, SupportsIndex, Union -from mavedb.lib.validation.constants.conversion import codon_dict_DNA -from mavedb.lib.validation.constants.conversion import aa_dict_key_1 -from mavedb.lib.validation.constants.general import null_values_re from mavehgvs.variant import Variant +from mavedb.lib.validation.constants.conversion import aa_dict_key_1, codon_dict_DNA +from mavedb.lib.validation.constants.general import null_values_re + def is_null(value): """ @@ -89,13 +89,15 @@ def construct_hgvs_pro(wt: str, mutant: str, position: int, target_seq: Optional # check that the provided 3 letter amino acid codes are valid if wt not in aa_dict_key_1.values(): raise ValueError( - "wt 3 letter amino acid code {} is invalid, " - "must be one of the following: {}".format(wt, list(aa_dict_key_1.values())) + "wt 3 letter amino acid code {} is invalid, " "must be one of the following: {}".format( + wt, list(aa_dict_key_1.values()) + ) ) if mutant not in aa_dict_key_1.values(): raise ValueError( - "wt 3 letter amino acid code {} is invalid, " - "must be one of the following: {}".format(mutant, list(aa_dict_key_1.values())) + "wt 3 letter amino acid code {} is invalid, " "must be one of the following: {}".format( + mutant, list(aa_dict_key_1.values()) + ) ) if wt == mutant: diff --git a/src/mavedb/lib/validation/variant.py b/src/mavedb/lib/validation/variant.py index abd0fc43..2d0f5bcf 100644 --- a/src/mavedb/lib/validation/variant.py +++ b/src/mavedb/lib/validation/variant.py @@ -1,12 +1,13 @@ from typing import Optional, Union -from mavehgvs import Variant, MaveHgvsParseError -from mavedb.lib.validation.exceptions import ValidationError +from mavehgvs import MaveHgvsParseError, Variant + from mavedb.lib.validation.constants.general import ( hgvs_nt_column, - hgvs_splice_column, hgvs_pro_column, + hgvs_splice_column, ) +from mavedb.lib.validation.exceptions import ValidationError __all__ = ["validate_hgvs_string"] diff --git a/src/mavedb/logging/config.py b/src/mavedb/logging/config.py index 6ccab6ce..a487d5b5 100644 --- a/src/mavedb/logging/config.py +++ b/src/mavedb/logging/config.py @@ -1,4 +1,5 @@ import os + import yaml from pkg_resources import resource_stream diff --git a/src/mavedb/models/access_key.py b/src/mavedb/models/access_key.py index f6590858..046a7e34 100644 --- a/src/mavedb/models/access_key.py +++ b/src/mavedb/models/access_key.py @@ -1,13 +1,12 @@ from datetime import datetime from typing import TYPE_CHECKING, Optional -from sqlalchemy import Column, Date, DateTime, Integer, String, ForeignKey -from sqlalchemy.orm import relationship, Mapped +from sqlalchemy import Column, Date, DateTime, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, relationship from mavedb.db.base import Base -from mavedb.models.user import User from mavedb.models.role import Role - +from mavedb.models.user import User if TYPE_CHECKING: from mavedb.lib.authorization import UserRole diff --git a/src/mavedb/models/controlled_keyword.py b/src/mavedb/models/controlled_keyword.py index 033aa474..4d5b86fd 100644 --- a/src/mavedb/models/controlled_keyword.py +++ b/src/mavedb/models/controlled_keyword.py @@ -6,7 +6,7 @@ class ControlledKeyword(Base): - __tablename__ = 'controlled_keywords' + __tablename__ = "controlled_keywords" id = Column(Integer, primary_key=True, index=True) key = Column(String, nullable=False) @@ -16,4 +16,4 @@ class ControlledKeyword(Base): description = Column(String, nullable=True) creation_date = Column(Date, nullable=False, default=date.today) modification_date = Column(Date, nullable=False, default=date.today, onupdate=date.today) - __table_args__ = (UniqueConstraint('key', 'value', name='ix_controlled_keywords_key_value'), ) + __table_args__ = (UniqueConstraint("key", "value", name="ix_controlled_keywords_key_value"),) diff --git a/src/mavedb/models/ensembl_offset.py b/src/mavedb/models/ensembl_offset.py index f8405905..28bb09a4 100644 --- a/src/mavedb/models/ensembl_offset.py +++ b/src/mavedb/models/ensembl_offset.py @@ -1,5 +1,5 @@ -from sqlalchemy import Column, Integer, ForeignKey -from sqlalchemy.orm import relationship, backref, Mapped +from sqlalchemy import Column, ForeignKey, Integer +from sqlalchemy.orm import Mapped, backref, relationship from mavedb.db.base import Base from mavedb.models.ensembl_identifier import EnsemblIdentifier diff --git a/src/mavedb/models/experiment.py b/src/mavedb/models/experiment.py index 362c492a..bfd53f8d 100644 --- a/src/mavedb/models/experiment.py +++ b/src/mavedb/models/experiment.py @@ -1,26 +1,25 @@ from datetime import date - -from typing import Optional, List, TYPE_CHECKING +from typing import TYPE_CHECKING, List, Optional from sqlalchemy import Boolean, Column, Date, ForeignKey, Integer, String +from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.event import listens_for -from sqlalchemy.ext.associationproxy import association_proxy, AssociationProxy -from sqlalchemy.orm import relationship, Mapped +from sqlalchemy.ext.associationproxy import AssociationProxy, association_proxy +from sqlalchemy.orm import Mapped, relationship from sqlalchemy.schema import Table -from sqlalchemy.dialects.postgresql import JSONB from mavedb.db.base import Base from mavedb.lib.temp_urns import generate_temp_urn from mavedb.models.contributor import Contributor +from mavedb.models.controlled_keyword import ControlledKeyword +from mavedb.models.doi_identifier import DoiIdentifier from mavedb.models.experiment_controlled_keyword import ExperimentControlledKeywordAssociation +from mavedb.models.experiment_publication_identifier import ExperimentPublicationIdentifierAssociation from mavedb.models.experiment_set import ExperimentSet -from mavedb.models.controlled_keyword import ControlledKeyword from mavedb.models.legacy_keyword import LegacyKeyword -from mavedb.models.doi_identifier import DoiIdentifier +from mavedb.models.publication_identifier import PublicationIdentifier from mavedb.models.raw_read_identifier import RawReadIdentifier -from mavedb.models.experiment_publication_identifier import ExperimentPublicationIdentifierAssociation from mavedb.models.user import User -from mavedb.models.publication_identifier import PublicationIdentifier if TYPE_CHECKING: from mavedb.models.score_set import ScoreSet @@ -92,7 +91,8 @@ class Experiment(Base): back_populates="experiment", cascade="all, delete-orphan" ) legacy_keyword_objs: Mapped[list[LegacyKeyword]] = relationship( - "LegacyKeyword", secondary=experiments_legacy_keywords_association_table, backref="experiments") + "LegacyKeyword", secondary=experiments_legacy_keywords_association_table, backref="experiments" + ) doi_identifiers: Mapped[list[DoiIdentifier]] = relationship( "DoiIdentifier", secondary=experiments_doi_identifiers_association_table, backref="experiments" ) @@ -126,16 +126,18 @@ def keywords(self) -> list[dict]: keywords = [] for keyword_assoc in keyword_objs: controlled_keyword = keyword_assoc.controlled_keyword - keywords.append({ - "keyword": { - "key": controlled_keyword.key, - "value": controlled_keyword.value, - "vocabulary": controlled_keyword.vocabulary, - "special": controlled_keyword.special, - "description": controlled_keyword.description, - }, - "description": keyword_assoc.description - }) + keywords.append( + { + "keyword": { + "key": controlled_keyword.key, + "value": controlled_keyword.value, + "vocabulary": controlled_keyword.vocabulary, + "special": controlled_keyword.special, + "description": controlled_keyword.description, + }, + "description": keyword_assoc.description, + } + ) return keywords @property @@ -155,13 +157,11 @@ async def set_keywords(self, db, keywords: list): ExperimentControlledKeywordAssociation( experiment=self, controlled_keyword=await self._find_keyword( - db, - keyword_obj.keyword.key, - keyword_obj.keyword.value, - keyword_obj.keyword.vocabulary + db, keyword_obj.keyword.key, keyword_obj.keyword.value, keyword_obj.keyword.vocabulary ), description=keyword_obj.description, - ) for keyword_obj in keywords + ) + for keyword_obj in keywords ] else: self.keyword_objs = [] @@ -178,12 +178,14 @@ async def _find_or_create_legacy_keyword(self, db, keyword_text): return keyword_obj async def _find_keyword(self, db, key: str, value: str, vocabulary: Optional[str]): - query = db.query(ControlledKeyword).filter(ControlledKeyword.key == key).filter(ControlledKeyword.value == value) + query = ( + db.query(ControlledKeyword).filter(ControlledKeyword.key == key).filter(ControlledKeyword.value == value) + ) if vocabulary: query = query.filter(ControlledKeyword.vocabulary == vocabulary) controlled_keyword_obj = query.one_or_none() if controlled_keyword_obj is None: - raise ValueError(f'Unknown keyword {key}:{value}') + raise ValueError(f"Unknown keyword {key}:{value}") return controlled_keyword_obj diff --git a/src/mavedb/models/experiment_controlled_keyword.py b/src/mavedb/models/experiment_controlled_keyword.py index 67d79cf8..c2d7f68f 100644 --- a/src/mavedb/models/experiment_controlled_keyword.py +++ b/src/mavedb/models/experiment_controlled_keyword.py @@ -1,10 +1,11 @@ -from sqlalchemy import Column, Integer, ForeignKey, String -from sqlalchemy.orm import backref, relationship, Mapped from typing import TYPE_CHECKING -from mavedb.db.base import Base +from sqlalchemy import Column, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, backref, relationship +from mavedb.db.base import Base from mavedb.models.controlled_keyword import ControlledKeyword + if TYPE_CHECKING: from mavedb.models.experiment import Experiment @@ -12,12 +13,12 @@ class ExperimentControlledKeywordAssociation(Base): __tablename__ = "experiment_controlled_keywords" - controlled_keyword_id = Column(Integer, ForeignKey('controlled_keywords.id'), nullable=False, primary_key=True) + controlled_keyword_id = Column(Integer, ForeignKey("controlled_keywords.id"), nullable=False, primary_key=True) controlled_keyword: Mapped[ControlledKeyword] = relationship( - 'ControlledKeyword', backref=backref('experiment_controlled_keywords', uselist=True) + "ControlledKeyword", backref=backref("experiment_controlled_keywords", uselist=True) ) experiment_id = Column(Integer, ForeignKey("experiments.id"), nullable=False, primary_key=True) - experiment: Mapped['Experiment'] = relationship(back_populates="keyword_objs") + experiment: Mapped["Experiment"] = relationship(back_populates="keyword_objs") - description = Column(String, nullable=True) \ No newline at end of file + description = Column(String, nullable=True) diff --git a/src/mavedb/models/experiment_publication_identifier.py b/src/mavedb/models/experiment_publication_identifier.py index 71690749..4e41ad4e 100644 --- a/src/mavedb/models/experiment_publication_identifier.py +++ b/src/mavedb/models/experiment_publication_identifier.py @@ -1,11 +1,11 @@ -from sqlalchemy import Column, Integer, Boolean, ForeignKey -from sqlalchemy.orm import relationship, Mapped - -from mavedb.db.base import Base - # Prevent circular imports from typing import TYPE_CHECKING +from sqlalchemy import Boolean, Column, ForeignKey, Integer +from sqlalchemy.orm import Mapped, relationship + +from mavedb.db.base import Base + if TYPE_CHECKING: from mavedb.models.experiment import Experiment from mavedb.models.publication_identifier import PublicationIdentifier diff --git a/src/mavedb/models/experiment_set.py b/src/mavedb/models/experiment_set.py index f9133dd8..a413cf04 100644 --- a/src/mavedb/models/experiment_set.py +++ b/src/mavedb/models/experiment_set.py @@ -1,19 +1,20 @@ from datetime import date -from typing import List, TYPE_CHECKING +from typing import TYPE_CHECKING, List from sqlalchemy import Boolean, Column, Date, ForeignKey, Integer, String -from sqlalchemy.orm import relationship, Mapped -from sqlalchemy.schema import Table from sqlalchemy.dialects.postgresql import JSONB +from sqlalchemy.orm import Mapped, relationship +from sqlalchemy.schema import Table from mavedb.db.base import Base from mavedb.lib.temp_urns import generate_temp_urn + from .contributor import Contributor -from .user import User from .doi_identifier import DoiIdentifier from .legacy_keyword import LegacyKeyword from .publication_identifier import PublicationIdentifier from .raw_read_identifier import RawReadIdentifier +from .user import User if TYPE_CHECKING: from mavedb.models.experiment import Experiment diff --git a/src/mavedb/models/mapped_variant.py b/src/mavedb/models/mapped_variant.py index 50bba4f4..5a418b22 100644 --- a/src/mavedb/models/mapped_variant.py +++ b/src/mavedb/models/mapped_variant.py @@ -1,10 +1,11 @@ from datetime import date from sqlalchemy import Boolean, Column, Date, ForeignKey, Integer, String -from sqlalchemy.orm import relationship, backref, Mapped from sqlalchemy.dialects.postgresql import JSONB +from sqlalchemy.orm import Mapped, backref, relationship from mavedb.db.base import Base + from .variant import Variant diff --git a/src/mavedb/models/refseq_offset.py b/src/mavedb/models/refseq_offset.py index 486addef..ba26b359 100644 --- a/src/mavedb/models/refseq_offset.py +++ b/src/mavedb/models/refseq_offset.py @@ -1,5 +1,5 @@ -from sqlalchemy import Column, Integer, ForeignKey -from sqlalchemy.orm import relationship, backref, Mapped +from sqlalchemy import Column, ForeignKey, Integer +from sqlalchemy.orm import Mapped, backref, relationship from mavedb.db.base import Base from mavedb.models.refseq_identifier import RefseqIdentifier diff --git a/src/mavedb/models/role.py b/src/mavedb/models/role.py index deb162c7..de6b7d63 100644 --- a/src/mavedb/models/role.py +++ b/src/mavedb/models/role.py @@ -1,4 +1,4 @@ -from sqlalchemy import Column, Integer, Enum +from sqlalchemy import Column, Enum, Integer from mavedb.db.base import Base from mavedb.models.enums.user_role import UserRole diff --git a/src/mavedb/models/score_set.py b/src/mavedb/models/score_set.py index 44cd0802..40cfce21 100644 --- a/src/mavedb/models/score_set.py +++ b/src/mavedb/models/score_set.py @@ -1,28 +1,27 @@ from datetime import date +from typing import TYPE_CHECKING, List, Optional + from sqlalchemy import Boolean, Column, Date, Enum, ForeignKey, Integer, String -from sqlalchemy.orm import relationship, Mapped -from sqlalchemy.ext.associationproxy import association_proxy, AssociationProxy -from sqlalchemy.schema import Table from sqlalchemy.dialects.postgresql import JSONB +from sqlalchemy.ext.associationproxy import AssociationProxy, association_proxy +from sqlalchemy.orm import Mapped, relationship +from sqlalchemy.schema import Table -from typing import List, TYPE_CHECKING, Optional - +import mavedb.models.score_set_publication_identifier from mavedb.db.base import Base +from mavedb.models.contributor import Contributor +from mavedb.models.doi_identifier import DoiIdentifier from mavedb.models.enums.mapping_state import MappingState from mavedb.models.enums.processing_state import ProcessingState -import mavedb.models.score_set_publication_identifier - -from mavedb.models.contributor import Contributor from mavedb.models.experiment import Experiment -from mavedb.models.user import User -from mavedb.models.license import License from mavedb.models.legacy_keyword import LegacyKeyword -from mavedb.models.doi_identifier import DoiIdentifier +from mavedb.models.license import License from mavedb.models.publication_identifier import PublicationIdentifier +from mavedb.models.user import User if TYPE_CHECKING: - from mavedb.models.variant import Variant from mavedb.models.target_gene import TargetGene + from mavedb.models.variant import Variant # from .raw_read_identifier import SraIdentifier from mavedb.lib.temp_urns import generate_temp_urn diff --git a/src/mavedb/models/score_set_publication_identifier.py b/src/mavedb/models/score_set_publication_identifier.py index e82424d4..37e5bbed 100644 --- a/src/mavedb/models/score_set_publication_identifier.py +++ b/src/mavedb/models/score_set_publication_identifier.py @@ -1,14 +1,14 @@ -from sqlalchemy import Column, Integer, Boolean, ForeignKey -from sqlalchemy.orm import relationship, Mapped - -from mavedb.db.base import Base - # Prevent circular imports from typing import TYPE_CHECKING +from sqlalchemy import Boolean, Column, ForeignKey, Integer +from sqlalchemy.orm import Mapped, relationship + +from mavedb.db.base import Base + if TYPE_CHECKING: - from mavedb.models.score_set import ScoreSet from mavedb.models.publication_identifier import PublicationIdentifier + from mavedb.models.score_set import ScoreSet class ScoreSetPublicationIdentifierAssociation(Base): diff --git a/src/mavedb/models/target_gene.py b/src/mavedb/models/target_gene.py index 540c8a26..36b88f85 100644 --- a/src/mavedb/models/target_gene.py +++ b/src/mavedb/models/target_gene.py @@ -1,14 +1,14 @@ from datetime import date +from typing import TYPE_CHECKING + from sqlalchemy import Column, Date, ForeignKey, Integer, String from sqlalchemy.dialects.postgresql import JSONB -from sqlalchemy.orm import backref, relationship, Mapped - -from typing import TYPE_CHECKING +from sqlalchemy.orm import Mapped, backref, relationship from mavedb.db.base import Base -from mavedb.models.target_sequence import TargetSequence -from mavedb.models.target_accession import TargetAccession from mavedb.models.score_set import ScoreSet +from mavedb.models.target_accession import TargetAccession +from mavedb.models.target_sequence import TargetSequence if TYPE_CHECKING: from mavedb.models.ensembl_offset import EnsemblOffset diff --git a/src/mavedb/models/target_sequence.py b/src/mavedb/models/target_sequence.py index 6182da03..763a28ee 100644 --- a/src/mavedb/models/target_sequence.py +++ b/src/mavedb/models/target_sequence.py @@ -1,12 +1,13 @@ from datetime import date -from typing import List, TYPE_CHECKING +from typing import TYPE_CHECKING, List -from sqlalchemy import Column, Date, Integer, String, ForeignKey -from sqlalchemy.orm import relationship, backref, Mapped -from .taxonomy import Taxonomy +from sqlalchemy import Column, Date, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, backref, relationship from mavedb.db.base import Base +from .taxonomy import Taxonomy + if TYPE_CHECKING: from mavedb.models.target_gene import TargetGene diff --git a/src/mavedb/models/taxonomy.py b/src/mavedb/models/taxonomy.py index d78beeed..cffd7c02 100644 --- a/src/mavedb/models/taxonomy.py +++ b/src/mavedb/models/taxonomy.py @@ -1,9 +1,10 @@ from datetime import date -from sqlalchemy import Column, Date, ForeignKey, Integer, String, Boolean -from sqlalchemy.orm import relationship, Mapped +from sqlalchemy import Boolean, Column, Date, ForeignKey, Integer, String +from sqlalchemy.orm import Mapped, relationship from mavedb.db.base import Base + from .genome_identifier import GenomeIdentifier diff --git a/src/mavedb/models/uniprot_offset.py b/src/mavedb/models/uniprot_offset.py index e2bda72c..d97fdb2c 100644 --- a/src/mavedb/models/uniprot_offset.py +++ b/src/mavedb/models/uniprot_offset.py @@ -1,9 +1,9 @@ -from sqlalchemy import Column, Integer, ForeignKey -from sqlalchemy.orm import relationship, backref, Mapped +from sqlalchemy import Column, ForeignKey, Integer +from sqlalchemy.orm import Mapped, backref, relationship from mavedb.db.base import Base -from mavedb.models.uniprot_identifier import UniprotIdentifier from mavedb.models.target_gene import TargetGene +from mavedb.models.uniprot_identifier import UniprotIdentifier class UniprotOffset(Base): diff --git a/src/mavedb/models/user.py b/src/mavedb/models/user.py index 992ca32a..1cff0eff 100644 --- a/src/mavedb/models/user.py +++ b/src/mavedb/models/user.py @@ -1,9 +1,11 @@ -from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, String, Table -from sqlalchemy.orm import relationship, Mapped from typing import TYPE_CHECKING + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer, String, Table +from sqlalchemy.orm import Mapped, relationship + from mavedb.db.base import Base -from mavedb.models.role import Role from mavedb.models.enums.user_role import UserRole +from mavedb.models.role import Role users_roles_association_table = Table( "users_roles", diff --git a/src/mavedb/models/variant.py b/src/mavedb/models/variant.py index b2d1e87d..f3a5821a 100644 --- a/src/mavedb/models/variant.py +++ b/src/mavedb/models/variant.py @@ -1,10 +1,11 @@ from datetime import date from sqlalchemy import Column, Date, ForeignKey, Integer, String -from sqlalchemy.orm import relationship, Mapped from sqlalchemy.dialects.postgresql import JSONB +from sqlalchemy.orm import Mapped, relationship from mavedb.db.base import Base + from .score_set import ScoreSet diff --git a/src/mavedb/routers/access_keys.py b/src/mavedb/routers/access_keys.py index 683da46e..ce40529e 100644 --- a/src/mavedb/routers/access_keys.py +++ b/src/mavedb/routers/access_keys.py @@ -1,13 +1,13 @@ -import secrets import logging +import secrets from typing import Any from cryptography.hazmat.backends import default_backend as crypto_default_backend from cryptography.hazmat.primitives import serialization as crypto_serialization from cryptography.hazmat.primitives.asymmetric import rsa from fastapi import APIRouter, Depends -from fastapi.exceptions import HTTPException from fastapi.encoders import jsonable_encoder +from fastapi.exceptions import HTTPException from sqlalchemy.orm import Session from mavedb import deps diff --git a/src/mavedb/routers/api_information.py b/src/mavedb/routers/api_information.py index 6ca3f436..8ca8c3f3 100644 --- a/src/mavedb/routers/api_information.py +++ b/src/mavedb/routers/api_information.py @@ -14,7 +14,4 @@ def show_version() -> Any: Describe the API version. """ - return api_version.ApiVersion( - name=__project__, - version=__version__ - ) + return api_version.ApiVersion(name=__project__, version=__version__) diff --git a/src/mavedb/routers/controlled_keywords.py b/src/mavedb/routers/controlled_keywords.py index 79dcacf8..f600f6c6 100644 --- a/src/mavedb/routers/controlled_keywords.py +++ b/src/mavedb/routers/controlled_keywords.py @@ -1,10 +1,9 @@ from fastapi import APIRouter, Depends from fastapi.exceptions import HTTPException -from sqlalchemy.orm import Session from sqlalchemy import func +from sqlalchemy.orm import Session from mavedb import deps - from mavedb.lib.keywords import search_keyword as _search_keyword from mavedb.models.controlled_keyword import ControlledKeyword from mavedb.view_models import keyword @@ -30,7 +29,12 @@ def fetch_keywords_by_key( Fetch keywords by category. """ lower_key = key.lower() - items = db.query(ControlledKeyword).filter(func.lower(ControlledKeyword.key) == lower_key).order_by(ControlledKeyword.value).all() + items = ( + db.query(ControlledKeyword) + .filter(func.lower(ControlledKeyword.key) == lower_key) + .order_by(ControlledKeyword.value) + .all() + ) if not items: raise HTTPException(status_code=404, detail=f"Controlled keywords with key {key} not found") return items diff --git a/src/mavedb/routers/experiment_sets.py b/src/mavedb/routers/experiment_sets.py index eeaaf34c..283b3188 100644 --- a/src/mavedb/routers/experiment_sets.py +++ b/src/mavedb/routers/experiment_sets.py @@ -1,15 +1,15 @@ import logging -from typing import Any from operator import attrgetter +from typing import Any from fastapi import APIRouter, Depends, HTTPException from sqlalchemy.orm import Session from mavedb import deps -from mavedb.lib.authentication import get_current_user, UserData -from mavedb.lib.permissions import has_permission, Action +from mavedb.lib.authentication import UserData, get_current_user from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context +from mavedb.lib.permissions import Action, has_permission from mavedb.models.experiment_set import ExperimentSet from mavedb.view_models import experiment_set diff --git a/src/mavedb/routers/experiments.py b/src/mavedb/routers/experiments.py index 6ca47430..cea7209e 100644 --- a/src/mavedb/routers/experiments.py +++ b/src/mavedb/routers/experiments.py @@ -2,13 +2,14 @@ from operator import attrgetter from typing import Any, Optional +import pydantic +import requests from fastapi import APIRouter, Depends, HTTPException from fastapi.encoders import jsonable_encoder -import pydantic from sqlalchemy.orm import Session from mavedb import deps -from mavedb.lib.authentication import get_current_user, UserData +from mavedb.lib.authentication import UserData, get_current_user from mavedb.lib.authorization import require_current_user, require_current_user_with_email from mavedb.lib.contributors import find_or_create_contributor from mavedb.lib.exceptions import NonexistentOrcidUserError @@ -18,12 +19,12 @@ find_or_create_publication_identifier, find_or_create_raw_read_identifier, ) +from mavedb.lib.keywords import search_keyword from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context -from mavedb.lib.permissions import assert_permission, has_permission, Action +from mavedb.lib.permissions import Action, assert_permission, has_permission from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.keywords import validate_keyword_list -from mavedb.lib.keywords import search_keyword from mavedb.models.contributor import Contributor from mavedb.models.experiment import Experiment from mavedb.models.experiment_controlled_keyword import ExperimentControlledKeywordAssociation @@ -32,8 +33,6 @@ from mavedb.view_models import experiment, score_set from mavedb.view_models.search import ExperimentsSearch -import requests - logger = logging.getLogger(__name__) router = APIRouter( diff --git a/src/mavedb/routers/hgvs.py b/src/mavedb/routers/hgvs.py index e0057cd1..57ceecad 100644 --- a/src/mavedb/routers/hgvs.py +++ b/src/mavedb/routers/hgvs.py @@ -1,5 +1,5 @@ -from typing import Any from itertools import chain +from typing import Any import hgvs.dataproviders.uta from cdot.hgvs.dataproviders import RESTDataProvider @@ -110,7 +110,7 @@ def transcript_info(transcript: str, hdp: RESTDataProvider = Depends(hgvs_data_p return transcript_info - + @router.get("/transcripts/protein/{transcript}", status_code=200, response_model=str) def convert_to_protein(transcript: str, hdp: RESTDataProvider = Depends(hgvs_data_provider)) -> str: """ diff --git a/src/mavedb/routers/mapped_variant.py b/src/mavedb/routers/mapped_variant.py index 8a87061b..05b9f6ad 100644 --- a/src/mavedb/routers/mapped_variant.py +++ b/src/mavedb/routers/mapped_variant.py @@ -2,11 +2,10 @@ from fastapi import APIRouter, Depends from fastapi.exceptions import HTTPException -from sqlalchemy.orm import Session from sqlalchemy.exc import MultipleResultsFound +from sqlalchemy.orm import Session from mavedb import deps - from mavedb.models.mapped_variant import MappedVariant from mavedb.models.variant import Variant from mavedb.view_models import mapped_variant diff --git a/src/mavedb/routers/orcid.py b/src/mavedb/routers/orcid.py index c6502bc5..53f4a090 100644 --- a/src/mavedb/routers/orcid.py +++ b/src/mavedb/routers/orcid.py @@ -2,15 +2,15 @@ import os from typing import Any -from fastapi import APIRouter, Depends, HTTPException import httpx +from fastapi import APIRouter, Depends, HTTPException from starlette.responses import JSONResponse from mavedb.lib.authorization import require_current_user -from mavedb.lib.orcid import fetch_orcid_user -from mavedb.models.user import User from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context +from mavedb.lib.orcid import fetch_orcid_user +from mavedb.models.user import User from mavedb.view_models import orcid logger = logging.getLogger(__name__) diff --git a/src/mavedb/routers/permissions.py b/src/mavedb/routers/permissions.py index 480621a1..02187a17 100644 --- a/src/mavedb/routers/permissions.py +++ b/src/mavedb/routers/permissions.py @@ -1,15 +1,15 @@ import logging from enum import Enum +from typing import Optional, Union from fastapi import APIRouter, Depends, HTTPException from sqlalchemy.orm import Session -from typing import Union, Optional from mavedb import deps -from mavedb.lib.authentication import get_current_user, UserData -from mavedb.lib.permissions import has_permission, Action +from mavedb.lib.authentication import UserData, get_current_user from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context +from mavedb.lib.permissions import Action, has_permission from mavedb.models.experiment import Experiment from mavedb.models.experiment_set import ExperimentSet from mavedb.models.score_set import ScoreSet @@ -30,11 +30,7 @@ class ModelName(str, Enum): score_set = "score-set" -@router.get( - "/user-is-permitted/{model_name}/{urn}/{action}", - status_code=200, - response_model=bool -) +@router.get("/user-is-permitted/{model_name}/{urn}/{action}", status_code=200, response_model=bool) async def check_permission( *, model_name: ModelName, diff --git a/src/mavedb/routers/score_sets.py b/src/mavedb/routers/score_sets.py index c15e5785..02022e40 100644 --- a/src/mavedb/routers/score_sets.py +++ b/src/mavedb/routers/score_sets.py @@ -3,21 +3,21 @@ from typing import Any, List, Optional import pandas as pd +import pydantic from arq import ArqRedis -from fastapi import APIRouter, Depends, File, status, UploadFile, Query +from fastapi import APIRouter, Depends, File, Query, UploadFile, status from fastapi.encoders import jsonable_encoder from fastapi.exceptions import HTTPException from fastapi.responses import StreamingResponse -import pydantic from sqlalchemy import or_ -from sqlalchemy.orm import Session from sqlalchemy.exc import MultipleResultsFound +from sqlalchemy.orm import Session from mavedb import deps from mavedb.lib.authentication import UserData from mavedb.lib.authorization import get_current_user, require_current_user, require_current_user_with_email from mavedb.lib.contributors import find_or_create_contributor -from mavedb.lib.exceptions import NonexistentOrcidUserError, ValidationError +from mavedb.lib.exceptions import MixedTargetError, NonexistentOrcidUserError, ValidationError from mavedb.lib.identifiers import ( create_external_gene_identifier_offset, find_or_create_doi_identifier, @@ -25,38 +25,38 @@ ) from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import ( + correlation_id_for_context, logging_context, save_to_logging_context, - correlation_id_for_context, ) from mavedb.lib.permissions import Action, assert_permission from mavedb.lib.score_sets import ( + csv_data_to_df, find_meta_analyses_for_experiment_sets, get_score_set_counts_as_csv, get_score_set_scores_as_csv, - search_score_sets as _search_score_sets, - csv_data_to_df, variants_to_csv_rows, ) +from mavedb.lib.score_sets import ( + search_score_sets as _search_score_sets, +) from mavedb.lib.taxonomies import find_or_create_taxonomy from mavedb.lib.urns import ( generate_experiment_set_urn, generate_experiment_urn, generate_score_set_urn, ) -from mavedb.lib.exceptions import MixedTargetError from mavedb.models.contributor import Contributor from mavedb.models.enums.processing_state import ProcessingState from mavedb.models.experiment import Experiment from mavedb.models.license import License from mavedb.models.mapped_variant import MappedVariant from mavedb.models.score_set import ScoreSet -from mavedb.models.target_gene import TargetGene from mavedb.models.target_accession import TargetAccession -from mavedb.models.variant import Variant +from mavedb.models.target_gene import TargetGene from mavedb.models.target_sequence import TargetSequence -from mavedb.view_models import mapped_variant -from mavedb.view_models import score_set +from mavedb.models.variant import Variant +from mavedb.view_models import mapped_variant, score_set from mavedb.view_models.search import ScoreSetsSearch logger = logging.getLogger(__name__) diff --git a/src/mavedb/routers/statistics.py b/src/mavedb/routers/statistics.py index a6ef326e..ad720526 100644 --- a/src/mavedb/routers/statistics.py +++ b/src/mavedb/routers/statistics.py @@ -1,13 +1,15 @@ from enum import Enum +from typing import Union + from fastapi import APIRouter, Depends, HTTPException -from sqlalchemy import func, Table, select +from sqlalchemy import Table, func, select from sqlalchemy.orm import Session -from typing import Union from mavedb.deps import get_db from mavedb.models.controlled_keyword import ControlledKeyword from mavedb.models.doi_identifier import DoiIdentifier -from mavedb.models.raw_read_identifier import RawReadIdentifier +from mavedb.models.ensembl_identifier import EnsemblIdentifier +from mavedb.models.ensembl_offset import EnsemblOffset from mavedb.models.experiment import ( Experiment, experiments_doi_identifiers_association_table, @@ -15,20 +17,19 @@ ) from mavedb.models.experiment_controlled_keyword import ExperimentControlledKeywordAssociation from mavedb.models.experiment_publication_identifier import ExperimentPublicationIdentifierAssociation -from mavedb.models.score_set_publication_identifier import ScoreSetPublicationIdentifierAssociation from mavedb.models.publication_identifier import PublicationIdentifier +from mavedb.models.raw_read_identifier import RawReadIdentifier +from mavedb.models.refseq_identifier import RefseqIdentifier +from mavedb.models.refseq_offset import RefseqOffset from mavedb.models.score_set import ( ScoreSet, score_sets_doi_identifiers_association_table, score_sets_raw_read_identifiers_association_table, ) -from mavedb.models.target_gene import TargetGene +from mavedb.models.score_set_publication_identifier import ScoreSetPublicationIdentifierAssociation from mavedb.models.target_accession import TargetAccession +from mavedb.models.target_gene import TargetGene from mavedb.models.target_sequence import TargetSequence -from mavedb.models.ensembl_identifier import EnsemblIdentifier -from mavedb.models.ensembl_offset import EnsemblOffset -from mavedb.models.refseq_identifier import RefseqIdentifier -from mavedb.models.refseq_offset import RefseqOffset from mavedb.models.taxonomy import Taxonomy from mavedb.models.uniprot_identifier import UniprotIdentifier from mavedb.models.uniprot_offset import UniprotOffset diff --git a/src/mavedb/routers/target_gene_identifiers.py b/src/mavedb/routers/target_gene_identifiers.py index 6a65bc56..f0df9b32 100644 --- a/src/mavedb/routers/target_gene_identifiers.py +++ b/src/mavedb/routers/target_gene_identifiers.py @@ -5,9 +5,9 @@ from sqlalchemy.orm import Session from mavedb import deps -from mavedb.view_models.search import TextSearch -from mavedb.view_models import external_gene_identifier from mavedb.lib.identifiers import EXTERNAL_GENE_IDENTIFIER_CLASSES +from mavedb.view_models import external_gene_identifier +from mavedb.view_models.search import TextSearch router = APIRouter( prefix="/api/v1/target-gene-identifiers", diff --git a/src/mavedb/routers/taxonomies.py b/src/mavedb/routers/taxonomies.py index 234b2cbf..84bd75a6 100644 --- a/src/mavedb/routers/taxonomies.py +++ b/src/mavedb/routers/taxonomies.py @@ -10,15 +10,13 @@ from mavedb.view_models import taxonomy from mavedb.view_models.search import TextSearch -router = APIRouter( - prefix='/api/v1/taxonomies', tags=['taxonomies'], responses={404: {'description': 'Not found'}} -) +router = APIRouter(prefix="/api/v1/taxonomies", tags=["taxonomies"], responses={404: {"description": "Not found"}}) -@router.get('/', status_code=200, response_model=List[taxonomy.Taxonomy], responses={404: {}}) +@router.get("/", status_code=200, response_model=List[taxonomy.Taxonomy], responses={404: {}}) def list_taxonomies( - *, - db: Session = Depends(deps.get_db), + *, + db: Session = Depends(deps.get_db), ) -> Any: """ List taxonomies. @@ -55,37 +53,33 @@ def list_taxonomy_common_names( return sorted(list(set(common_names))) -@router.get('/{item_id}', status_code=200, response_model=taxonomy.Taxonomy, responses={404: {}}) +@router.get("/{item_id}", status_code=200, response_model=taxonomy.Taxonomy, responses={404: {}}) def fetch_taxonomy( - *, - item_id: int, - db: Session = Depends(deps.get_db), + *, + item_id: int, + db: Session = Depends(deps.get_db), ) -> Any: """ Fetch a single taxonomy by ID. """ item = db.query(Taxonomy).filter(Taxonomy.id == item_id).first() if not item: - raise HTTPException( - status_code=404, detail=f'Taxonomy with ID {item_id} not found' - ) + raise HTTPException(status_code=404, detail=f"Taxonomy with ID {item_id} not found") return item -@router.get('/tax-id/{item_id}', status_code=200, response_model=taxonomy.Taxonomy, responses={404: {}}) +@router.get("/tax-id/{item_id}", status_code=200, response_model=taxonomy.Taxonomy, responses={404: {}}) def fetch_taxonomy_by_tax_id( - *, - item_id: int, - db: Session = Depends(deps.get_db), + *, + item_id: int, + db: Session = Depends(deps.get_db), ) -> Any: """ Fetch a single taxonomy by tax_id. """ item = db.query(Taxonomy).filter(Taxonomy.tax_id == item_id).first() if not item: - raise HTTPException( - status_code=404, detail=f'Taxonomy with tax_ID {item_id} not found' - ) + raise HTTPException(status_code=404, detail=f"Taxonomy with tax_ID {item_id} not found") return item @@ -103,7 +97,7 @@ async def search_taxonomies(search: TextSearch, db: Session = Depends(deps.get_d query = query.filter( or_( func.lower(Taxonomy.organism_name).contains(lower_search_text), - func.lower(Taxonomy.common_name).contains(lower_search_text) + func.lower(Taxonomy.common_name).contains(lower_search_text), ) ) else: diff --git a/src/mavedb/routers/users.py b/src/mavedb/routers/users.py index 9b69f8a3..d6b32d68 100644 --- a/src/mavedb/routers/users.py +++ b/src/mavedb/routers/users.py @@ -6,10 +6,10 @@ from mavedb import deps from mavedb.lib.authentication import UserData -from mavedb.lib.authorization import require_current_user, RoleRequirer +from mavedb.lib.authorization import RoleRequirer, require_current_user from mavedb.lib.logging import LoggedRoute from mavedb.lib.logging.context import logging_context, save_to_logging_context -from mavedb.lib.permissions import assert_permission, Action +from mavedb.lib.permissions import Action, assert_permission from mavedb.models.enums.user_role import UserRole from mavedb.models.user import User from mavedb.view_models import user diff --git a/src/mavedb/scripts/export_public_data.py b/src/mavedb/scripts/export_public_data.py index 7531d517..705e7981 100644 --- a/src/mavedb/scripts/export_public_data.py +++ b/src/mavedb/scripts/export_public_data.py @@ -24,11 +24,11 @@ and user details are limited to ORCID IDs and names of contributors to published data sets. """ -from datetime import datetime, timezone -from itertools import chain import json import logging import os +from datetime import datetime, timezone +from itertools import chain from typing import Callable, Iterable, TypeVar from zipfile import ZipFile diff --git a/src/mavedb/server_main.py b/src/mavedb/server_main.py index 90a83ad4..4651569d 100644 --- a/src/mavedb/server_main.py +++ b/src/mavedb/server_main.py @@ -2,6 +2,7 @@ import time import uvicorn +from eutils._internal.exceptions import EutilsRequestError # type: ignore from fastapi import FastAPI from fastapi.encoders import jsonable_encoder from fastapi.exceptions import RequestValidationError @@ -12,18 +13,19 @@ from starlette.requests import Request from starlette.responses import JSONResponse from starlette_context.plugins import CorrelationIdPlugin, RequestIdPlugin, UserAgentPlugin -from eutils._internal.exceptions import EutilsRequestError # type: ignore - -from mavedb.models import * # noqa: F403 from mavedb import __version__ +from mavedb.lib.exceptions import AmbiguousIdentifierError, MixedTargetError, NonexistentIdentifierError +from mavedb.lib.logging.canonical import log_request from mavedb.lib.logging.context import ( PopulatedRawContextMiddleware, - logging_context, format_raised_exception_info_as_dict, + logging_context, save_to_logging_context, ) -from mavedb.lib.logging.canonical import log_request +from mavedb.lib.permissions import PermissionException +from mavedb.lib.slack import send_slack_message +from mavedb.models import * # noqa: F403 from mavedb.routers import ( access_keys, api_information, @@ -37,17 +39,14 @@ orcid, permissions, publication_identifiers, - target_gene_identifiers, - taxonomies, raw_read_identifiers, score_sets, statistics, + target_gene_identifiers, target_genes, + taxonomies, users, ) -from mavedb.lib.exceptions import AmbiguousIdentifierError, NonexistentIdentifierError, MixedTargetError -from mavedb.lib.permissions import PermissionException -from mavedb.lib.slack import send_slack_message logger = logging.getLogger(__name__) diff --git a/src/mavedb/view_models/__init__.py b/src/mavedb/view_models/__init__.py index 4e4ea93e..2a7b6d45 100644 --- a/src/mavedb/view_models/__init__.py +++ b/src/mavedb/view_models/__init__.py @@ -1,6 +1,7 @@ -from pydantic.utils import GetterDict from typing import Any +from pydantic.utils import GetterDict + class PublicationIdentifiersGetter(GetterDict): """ diff --git a/src/mavedb/view_models/access_key.py b/src/mavedb/view_models/access_key.py index d965eb36..c5328eb9 100644 --- a/src/mavedb/view_models/access_key.py +++ b/src/mavedb/view_models/access_key.py @@ -1,8 +1,8 @@ from datetime import date from typing import Optional -from mavedb.view_models.base.base import BaseModel from mavedb.models.enums.user_role import UserRole +from mavedb.view_models.base.base import BaseModel class AccessKeyBase(BaseModel): diff --git a/src/mavedb/view_models/base/base.py b/src/mavedb/view_models/base/base.py index 47a377a8..a3a73ffd 100644 --- a/src/mavedb/view_models/base/base.py +++ b/src/mavedb/view_models/base/base.py @@ -1,5 +1,6 @@ from humps import camelize -from pydantic import BaseModel as PydanticBaseModel, validator +from pydantic import BaseModel as PydanticBaseModel +from pydantic import validator class BaseModel(PydanticBaseModel): diff --git a/src/mavedb/view_models/contributor.py b/src/mavedb/view_models/contributor.py index bb7f42ed..0e635080 100644 --- a/src/mavedb/view_models/contributor.py +++ b/src/mavedb/view_models/contributor.py @@ -1,6 +1,5 @@ from typing import Optional - from mavedb.view_models.base.base import BaseModel diff --git a/src/mavedb/view_models/doi_identifier.py b/src/mavedb/view_models/doi_identifier.py index 6685d14b..21d5d1f3 100644 --- a/src/mavedb/view_models/doi_identifier.py +++ b/src/mavedb/view_models/doi_identifier.py @@ -1,7 +1,7 @@ import idutils -from mavedb.view_models.base.base import BaseModel, validator from mavedb.lib.validation.exceptions import ValidationError +from mavedb.view_models.base.base import BaseModel, validator class DoiIdentifierBase(BaseModel): diff --git a/src/mavedb/view_models/experiment_controlled_keyword.py b/src/mavedb/view_models/experiment_controlled_keyword.py index d509ca90..d3385d8b 100644 --- a/src/mavedb/view_models/experiment_controlled_keyword.py +++ b/src/mavedb/view_models/experiment_controlled_keyword.py @@ -1,9 +1,10 @@ -from mavedb.view_models.base.base import BaseModel -from mavedb.view_models import keyword -from mavedb.lib.validation import keywords +from typing import Optional from pydantic import root_validator -from typing import Optional + +from mavedb.lib.validation import keywords +from mavedb.view_models import keyword +from mavedb.view_models.base.base import BaseModel class ExperimentControlledKeywordBase(BaseModel): diff --git a/src/mavedb/view_models/external_gene_identifier.py b/src/mavedb/view_models/external_gene_identifier.py index 0c120c4e..6d4aee87 100644 --- a/src/mavedb/view_models/external_gene_identifier.py +++ b/src/mavedb/view_models/external_gene_identifier.py @@ -1,7 +1,7 @@ from typing import Optional -from mavedb.view_models.base.base import BaseModel, validator from mavedb.lib.validation import identifier as identifier_validator +from mavedb.view_models.base.base import BaseModel, validator class ExternalGeneIdentifierBase(BaseModel): diff --git a/src/mavedb/view_models/external_gene_identifier_offset.py b/src/mavedb/view_models/external_gene_identifier_offset.py index 10cfbe30..71f260b4 100644 --- a/src/mavedb/view_models/external_gene_identifier_offset.py +++ b/src/mavedb/view_models/external_gene_identifier_offset.py @@ -1,5 +1,5 @@ -from mavedb.view_models.base.base import BaseModel, validator from mavedb.view_models import external_gene_identifier +from mavedb.view_models.base.base import BaseModel, validator class ExternalGeneIdentifierOffsetBase(BaseModel): diff --git a/src/mavedb/view_models/keyword.py b/src/mavedb/view_models/keyword.py index d115402e..74133933 100644 --- a/src/mavedb/view_models/keyword.py +++ b/src/mavedb/view_models/keyword.py @@ -1,5 +1,6 @@ # See https://pydantic-docs.helpmanual.io/usage/postponed_annotations/#self-referencing-models from __future__ import annotations + from typing import Optional from mavedb.lib.validation import keywords @@ -11,6 +12,7 @@ class KeywordBase(BaseModel): Keywords may have key but no value if users don't choose anything from dropdown menu. TODO: Should modify it when we confirm the final controlled keyword list. """ + key: str value: Optional[str] vocabulary: Optional[str] @@ -31,11 +33,13 @@ def validate_key(cls, v): class KeywordCreate(KeywordBase): """View model for creating a new keyword.""" + pass class KeywordUpdate(KeywordBase): """View model for updating a keyword.""" + pass @@ -49,9 +53,11 @@ class Config: class Keyword(SavedKeyword): """Keyword view model for non-admin clients.""" + pass class AdminKeyword(SavedKeyword): """Keyword view model containing properties to return to admin clients.""" - id: int \ No newline at end of file + + id: int diff --git a/src/mavedb/view_models/mapped_variant.py b/src/mavedb/view_models/mapped_variant.py index 7bf98300..9406519c 100644 --- a/src/mavedb/view_models/mapped_variant.py +++ b/src/mavedb/view_models/mapped_variant.py @@ -1,5 +1,5 @@ -from typing import Any, Optional from datetime import date +from typing import Any, Optional from .base.base import BaseModel diff --git a/src/mavedb/view_models/publication_identifier.py b/src/mavedb/view_models/publication_identifier.py index ad062c23..032d81aa 100644 --- a/src/mavedb/view_models/publication_identifier.py +++ b/src/mavedb/view_models/publication_identifier.py @@ -1,9 +1,9 @@ -from mavedb.lib.validation.publication import validate_publication, validate_db_name -from mavedb.view_models.base.base import BaseModel, validator +import logging from typing import Optional -from mavedb.lib.identifiers import PublicationAuthors -import logging +from mavedb.lib.identifiers import PublicationAuthors +from mavedb.lib.validation.publication import validate_db_name, validate_publication +from mavedb.view_models.base.base import BaseModel, validator logger = logging.getLogger(__name__) diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index 20e8e21c..ed95baf8 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -2,17 +2,17 @@ from __future__ import annotations from datetime import date -from pydantic import root_validator -from typing import Collection, Dict, Optional, Any, Sequence +from typing import Any, Collection, Dict, Optional, Sequence from humps import camelize +from pydantic import root_validator -from mavedb.lib.validation.constants.score_set import default_ranges from mavedb.lib.validation import urn_re +from mavedb.lib.validation.constants.score_set import default_ranges from mavedb.lib.validation.exceptions import ValidationError -from mavedb.lib.validation.utilities import is_null, inf_or_float -from mavedb.models.enums.processing_state import ProcessingState +from mavedb.lib.validation.utilities import inf_or_float, is_null from mavedb.models.enums.mapping_state import MappingState +from mavedb.models.enums.processing_state import ProcessingState from mavedb.view_models import PublicationIdentifiersGetter from mavedb.view_models.base.base import BaseModel, validator from mavedb.view_models.contributor import Contributor, ContributorCreate diff --git a/src/mavedb/view_models/target_gene.py b/src/mavedb/view_models/target_gene.py index 335a2ea3..c69f659f 100644 --- a/src/mavedb/view_models/target_gene.py +++ b/src/mavedb/view_models/target_gene.py @@ -4,11 +4,16 @@ from pydantic import root_validator from pydantic.utils import GetterDict +from mavedb.lib.validation import target from mavedb.view_models import external_gene_identifier_offset from mavedb.view_models.base.base import BaseModel, validator -from mavedb.view_models.target_sequence import AdminTargetSequence, TargetSequence, TargetSequenceCreate, SavedTargetSequence -from mavedb.view_models.target_accession import TargetAccession, TargetAccessionCreate, SavedTargetAccession -from mavedb.lib.validation import target +from mavedb.view_models.target_accession import SavedTargetAccession, TargetAccession, TargetAccessionCreate +from mavedb.view_models.target_sequence import ( + AdminTargetSequence, + SavedTargetSequence, + TargetSequence, + TargetSequenceCreate, +) class ExternalIdentifiersGetter(GetterDict): diff --git a/src/mavedb/view_models/target_sequence.py b/src/mavedb/view_models/target_sequence.py index 19e007ad..a56325cc 100644 --- a/src/mavedb/view_models/target_sequence.py +++ b/src/mavedb/view_models/target_sequence.py @@ -1,12 +1,12 @@ from datetime import date from typing import Optional -from mavedb.view_models.base.base import BaseModel, validator -from mavedb.view_models.taxonomy import AdminTaxonomy, SavedTaxonomy, Taxonomy, TaxonomyCreate +from fqfa import infer_sequence_type + from mavedb.lib.validation import target from mavedb.lib.validation.exceptions import ValidationError - -from fqfa import infer_sequence_type +from mavedb.view_models.base.base import BaseModel, validator +from mavedb.view_models.taxonomy import AdminTaxonomy, SavedTaxonomy, Taxonomy, TaxonomyCreate def sanitize_target_sequence_label(label: str): diff --git a/src/mavedb/view_models/taxonomy.py b/src/mavedb/view_models/taxonomy.py index 5a67f5fe..ed4c23e8 100644 --- a/src/mavedb/view_models/taxonomy.py +++ b/src/mavedb/view_models/taxonomy.py @@ -40,4 +40,4 @@ class Taxonomy(SavedTaxonomy): # Properties to return to admin clients class AdminTaxonomy(SavedTaxonomy): creation_date: date - modification_date: date \ No newline at end of file + modification_date: date diff --git a/src/mavedb/view_models/user.py b/src/mavedb/view_models/user.py index 9f6256c0..10818eba 100644 --- a/src/mavedb/view_models/user.py +++ b/src/mavedb/view_models/user.py @@ -1,10 +1,10 @@ from typing import Optional +from email_validator import EmailNotValidError, validate_email from pydantic import Field -from email_validator import validate_email, EmailNotValidError -from mavedb.models.enums.user_role import UserRole from mavedb.lib.validation.exceptions import ValidationError +from mavedb.models.enums.user_role import UserRole from mavedb.view_models.base.base import BaseModel, validator diff --git a/src/mavedb/worker/__init__.py b/src/mavedb/worker/__init__.py index 626f42c7..1f1489b5 100644 --- a/src/mavedb/worker/__init__.py +++ b/src/mavedb/worker/__init__.py @@ -1,7 +1,6 @@ from sqlalchemy.orm import configure_mappers from mavedb.models import * # noqa: F403 - from mavedb.worker.settings import ArqWorkerSettings # Scan all our model classes and create backref attributes. Otherwise, these attributes only get added to classes once diff --git a/src/mavedb/worker/jobs.py b/src/mavedb/worker/jobs.py index e1936073..fed261c8 100644 --- a/src/mavedb/worker/jobs.py +++ b/src/mavedb/worker/jobs.py @@ -2,37 +2,36 @@ import functools import logging from contextlib import asynccontextmanager -from datetime import timedelta, date +from datetime import date, timedelta from typing import Any, Optional - import pandas as pd from arq import ArqRedis from arq.jobs import Job, JobStatus from cdot.hgvs.dataproviders import RESTDataProvider -from sqlalchemy import cast, delete, select, null +from sqlalchemy import cast, delete, null, select from sqlalchemy.dialects.postgresql import JSONB from sqlalchemy.orm import Session -from mavedb.lib.exceptions import NonexistentMappingReferenceError, NonexistentMappingResultsError, MappingEnqueueError +from mavedb.data_providers.services import vrs_mapper +from mavedb.lib.exceptions import MappingEnqueueError, NonexistentMappingReferenceError, NonexistentMappingResultsError +from mavedb.lib.logging.context import format_raised_exception_info_as_dict from mavedb.lib.score_sets import ( columns_for_dataset, create_variants, create_variants_data, ) -from mavedb.lib.logging.context import format_raised_exception_info_as_dict from mavedb.lib.slack import send_slack_message -from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.dataframe import ( validate_and_standardize_dataframe_pair, ) +from mavedb.lib.validation.exceptions import ValidationError from mavedb.models.enums.mapping_state import MappingState from mavedb.models.enums.processing_state import ProcessingState from mavedb.models.mapped_variant import MappedVariant from mavedb.models.score_set import ScoreSet from mavedb.models.user import User from mavedb.models.variant import Variant -from mavedb.data_providers.services import vrs_mapper logger = logging.getLogger(__name__) diff --git a/src/mavedb/worker/settings.py b/src/mavedb/worker/settings.py index 56c82386..d91e48b8 100644 --- a/src/mavedb/worker/settings.py +++ b/src/mavedb/worker/settings.py @@ -5,10 +5,10 @@ from arq.connections import RedisSettings from arq.cron import CronJob +from mavedb.data_providers.services import cdot_rest +from mavedb.db.session import SessionLocal from mavedb.lib.logging.canonical import log_job from mavedb.worker.jobs import create_variants_for_score_set, map_variants_for_score_set, variant_mapper_manager -from mavedb.db.session import SessionLocal -from mavedb.data_providers.services import cdot_rest # ARQ requires at least one task on startup. BACKGROUND_FUNCTIONS: list[Callable] = [ diff --git a/tests/conftest.py b/tests/conftest.py index b40e9934..b58a5dd9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,8 +8,8 @@ import cdot.hgvs.dataproviders import email_validator import pytest -import pytest_postgresql import pytest_asyncio +import pytest_postgresql from arq import ArqRedis from arq.worker import Worker from fakeredis import FakeServer @@ -23,15 +23,15 @@ from mavedb.db.base import Base from mavedb.deps import get_db, get_worker, hgvs_data_provider +from mavedb.lib.authentication import UserData, get_current_user from mavedb.lib.authorization import require_current_user -from mavedb.lib.authentication import get_current_user, UserData from mavedb.models.user import User from mavedb.server_main import app from mavedb.worker.jobs import create_variants_for_score_set, map_variants_for_score_set, variant_mapper_manager sys.path.append(".") -from tests.helpers.constants import TEST_USER, ADMIN_USER +from tests.helpers.constants import ADMIN_USER, TEST_USER # needs the pytest_postgresql plugin installed assert pytest_postgresql.factories diff --git a/tests/dump_rels.py b/tests/dump_rels.py index 9f54f48d..bf5059e1 100644 --- a/tests/dump_rels.py +++ b/tests/dump_rels.py @@ -18,11 +18,9 @@ from mavedb.models import * # noqa -mappers = ( - m for r in mapperlib._mapper_registries for m in r.mappers -) +mappers = (m for r in mapperlib._mapper_registries for m in r.mappers) for m in sorted(mappers, key=lambda m: m.entity.__name__): for r in sorted(m.relationships, key=lambda r: r.target.name): - opts = ' '.join(sorted(r.cascade)) + opts = " ".join(sorted(r.cascade)) print(f"{m.entity.__name__} {r.key} {r.target.name} {opts}") diff --git a/tests/helpers/constants.py b/tests/helpers/constants.py index d08559cc..580294b6 100644 --- a/tests/helpers/constants.py +++ b/tests/helpers/constants.py @@ -1,5 +1,7 @@ from datetime import date, datetime + from humps import camelize + from mavedb.models.enums.processing_state import ProcessingState TEST_PUBMED_IDENTIFIER = "20711194" diff --git a/tests/helpers/util.py b/tests/helpers/util.py index 4aef1373..cda39c99 100644 --- a/tests/helpers/util.py +++ b/tests/helpers/util.py @@ -6,12 +6,12 @@ from arq import ArqRedis from sqlalchemy import select -from mavedb.models.contributor import Contributor -from mavedb.models.user import User -from mavedb.models.score_set import ScoreSet as ScoreSetDbModel +from mavedb.lib.score_sets import columns_for_dataset, create_variants, create_variants_data, csv_data_to_df from mavedb.lib.validation.dataframe import validate_and_standardize_dataframe_pair +from mavedb.models.contributor import Contributor from mavedb.models.enums.processing_state import ProcessingState -from mavedb.lib.score_sets import create_variants_data, create_variants, csv_data_to_df, columns_for_dataset +from mavedb.models.score_set import ScoreSet as ScoreSetDbModel +from mavedb.models.user import User from mavedb.view_models.experiment import Experiment, ExperimentCreate from mavedb.view_models.score_set import ScoreSet, ScoreSetCreate from tests.helpers.constants import ( diff --git a/tests/lib/conftest.py b/tests/lib/conftest.py index b83430a4..94860c35 100644 --- a/tests/lib/conftest.py +++ b/tests/lib/conftest.py @@ -1,11 +1,10 @@ import pytest +from mavedb.models.enums.user_role import UserRole from mavedb.models.license import License +from mavedb.models.role import Role from mavedb.models.taxonomy import Taxonomy from mavedb.models.user import User -from mavedb.models.role import Role -from mavedb.models.enums.user_role import UserRole - from tests.helpers.constants import ADMIN_USER, EXTRA_USER, TEST_LICENSE, TEST_TAXONOMY, TEST_USER diff --git a/tests/lib/test_authentication.py b/tests/lib/test_authentication.py index 16e35f52..d0c1aa0d 100644 --- a/tests/lib/test_authentication.py +++ b/tests/lib/test_authentication.py @@ -1,13 +1,12 @@ -import pytest +from unittest.mock import patch +import pytest from fastapi import HTTPException -from unittest.mock import patch -from mavedb.lib.authentication import get_current_user_data_from_api_key, get_current_user -from mavedb.models.user import User +from mavedb.lib.authentication import get_current_user, get_current_user_data_from_api_key from mavedb.models.enums.user_role import UserRole - -from tests.helpers.constants import TEST_USER, TEST_USER_DECODED_JWT, ADMIN_USER, ADMIN_USER_DECODED_JWT +from mavedb.models.user import User +from tests.helpers.constants import ADMIN_USER, ADMIN_USER_DECODED_JWT, TEST_USER, TEST_USER_DECODED_JWT from tests.helpers.util import create_api_key_for_current_user, mark_user_inactive diff --git a/tests/lib/test_score_set.py b/tests/lib/test_score_set.py index 7a99eb22..c4a98b6a 100644 --- a/tests/lib/test_score_set.py +++ b/tests/lib/test_score_set.py @@ -1,12 +1,10 @@ import io +import numpy as np import pandas as pd import pytest -import numpy as np from sqlalchemy import select -from mavedb.models.score_set import ScoreSet -from mavedb.models.variant import Variant from mavedb.lib.score_sets import ( HGVSColumns, columns_for_dataset, @@ -21,8 +19,9 @@ null_values_list, required_score_column, ) - -from tests.helpers.util import create_experiment, create_acc_score_set, create_seq_score_set +from mavedb.models.score_set import ScoreSet +from mavedb.models.variant import Variant +from tests.helpers.util import create_acc_score_set, create_experiment, create_seq_score_set def test_columns_for_dataset_no_dataset(): diff --git a/tests/routers/conftest.py b/tests/routers/conftest.py index 6f7f746d..5317ee0c 100644 --- a/tests/routers/conftest.py +++ b/tests/routers/conftest.py @@ -8,8 +8,8 @@ from mavedb.models.controlled_keyword import ControlledKeyword from mavedb.models.enums.user_role import UserRole from mavedb.models.license import License -from mavedb.models.taxonomy import Taxonomy from mavedb.models.role import Role +from mavedb.models.taxonomy import Taxonomy from mavedb.models.user import User from tests.helpers.constants import ( ADMIN_USER, diff --git a/tests/routers/test_access_keys.py b/tests/routers/test_access_keys.py index 79b053ed..4e266a0f 100644 --- a/tests/routers/test_access_keys.py +++ b/tests/routers/test_access_keys.py @@ -1,11 +1,9 @@ -from tests.helpers.constants import EXTRA_USER -from tests.helpers.dependency_overrider import DependencyOverrider - from mavedb.models.access_key import AccessKey from mavedb.models.enums.user_role import UserRole from mavedb.models.user import User - -from tests.helpers.util import create_api_key_for_current_user, create_admin_key_for_current_user +from tests.helpers.constants import EXTRA_USER +from tests.helpers.dependency_overrider import DependencyOverrider +from tests.helpers.util import create_admin_key_for_current_user, create_api_key_for_current_user def test_create_user_access_key(client, setup_router_db, session): diff --git a/tests/routers/test_experiments.py b/tests/routers/test_experiments.py index b7a9817d..fa390bfc 100644 --- a/tests/routers/test_experiments.py +++ b/tests/routers/test_experiments.py @@ -14,20 +14,13 @@ from mavedb.models.score_set import ScoreSet as ScoreSetDbModel from mavedb.view_models.experiment import Experiment, ExperimentCreate from mavedb.view_models.orcid import OrcidUser -from tests.helpers.util import ( - add_contributor, - change_ownership, - create_experiment, - create_seq_score_set, - create_seq_score_set_with_variants, -) from tests.helpers.constants import ( EXTRA_USER, TEST_BIORXIV_IDENTIFIER, TEST_CROSSREF_IDENTIFIER, TEST_EXPERIMENT_WITH_KEYWORD, - TEST_EXPERIMENT_WITH_KEYWORD_RESPONSE, TEST_EXPERIMENT_WITH_KEYWORD_HAS_DUPLICATE_OTHERS_RESPONSE, + TEST_EXPERIMENT_WITH_KEYWORD_RESPONSE, TEST_MEDRXIV_IDENTIFIER, TEST_MINIMAL_EXPERIMENT, TEST_MINIMAL_EXPERIMENT_RESPONSE, @@ -36,6 +29,13 @@ TEST_USER, ) from tests.helpers.dependency_overrider import DependencyOverrider +from tests.helpers.util import ( + add_contributor, + change_ownership, + create_experiment, + create_seq_score_set, + create_seq_score_set_with_variants, +) def test_test_minimal_experiment_is_valid(): diff --git a/tests/routers/test_hgvs.py b/tests/routers/test_hgvs.py index f6b394a0..092081bb 100644 --- a/tests/routers/test_hgvs.py +++ b/tests/routers/test_hgvs.py @@ -1,11 +1,10 @@ -import requests_mock +from unittest.mock import patch import cdot.hgvs.dataproviders +import requests_mock from hgvs.exceptions import HGVSDataNotAvailableError -from unittest.mock import patch - -from tests.helpers.constants import VALID_ACCESSION, VALID_GENE, TEST_CDOT_TRANSCRIPT +from tests.helpers.constants import TEST_CDOT_TRANSCRIPT, VALID_ACCESSION, VALID_GENE VALID_MAJOR_ASSEMBLY = "GRCh38" VALID_MINOR_ASSEMBLY = "GRCh38.p3" diff --git a/tests/routers/test_permissions.py b/tests/routers/test_permissions.py index c999ee22..83cddc53 100644 --- a/tests/routers/test_permissions.py +++ b/tests/routers/test_permissions.py @@ -1,3 +1,6 @@ +from mavedb.models.experiment import Experiment as ExperimentDbModel +from mavedb.models.experiment_set import ExperimentSet as ExperimentSetDbModel +from mavedb.models.score_set import ScoreSet as ScoreSetDbModel from tests.helpers.constants import TEST_USER from tests.helpers.util import ( add_contributor, @@ -6,9 +9,6 @@ create_seq_score_set, create_seq_score_set_with_variants, ) -from mavedb.models.experiment import Experiment as ExperimentDbModel -from mavedb.models.experiment_set import ExperimentSet as ExperimentSetDbModel -from mavedb.models.score_set import ScoreSet as ScoreSetDbModel # Test check_authorization function diff --git a/tests/routers/test_score_set.py b/tests/routers/test_score_set.py index 09f37a29..44207f97 100644 --- a/tests/routers/test_score_set.py +++ b/tests/routers/test_score_set.py @@ -5,13 +5,13 @@ import jsonschema from arq import ArqRedis + from mavedb.lib.validation.urn_re import MAVEDB_TMP_URN_RE from mavedb.models.enums.processing_state import ProcessingState from mavedb.models.experiment import Experiment as ExperimentDbModel from mavedb.models.score_set import ScoreSet as ScoreSetDbModel from mavedb.view_models.orcid import OrcidUser from mavedb.view_models.score_set import ScoreSet, ScoreSetCreate - from tests.helpers.constants import ( EXTRA_USER, TEST_MINIMAL_ACC_SCORESET, @@ -20,6 +20,7 @@ TEST_ORCID_ID, TEST_USER, ) +from tests.helpers.dependency_overrider import DependencyOverrider from tests.helpers.util import ( add_contributor, change_ownership, @@ -27,7 +28,6 @@ create_seq_score_set, create_seq_score_set_with_variants, ) -from tests.helpers.dependency_overrider import DependencyOverrider def test_TEST_MINIMAL_SEQ_SCORESET_is_valid(): diff --git a/tests/routers/test_users.py b/tests/routers/test_users.py index c49cf4f9..8110ed72 100644 --- a/tests/routers/test_users.py +++ b/tests/routers/test_users.py @@ -1,12 +1,11 @@ -import pytest +from unittest import mock +import pytest -from mavedb.models.enums.user_role import UserRole from mavedb.lib.authentication import get_current_user from mavedb.lib.authorization import require_current_user -from unittest import mock - -from tests.helpers.constants import TEST_USER, ADMIN_USER, EXTRA_USER, camelize +from mavedb.models.enums.user_role import UserRole +from tests.helpers.constants import ADMIN_USER, EXTRA_USER, TEST_USER, camelize from tests.helpers.dependency_overrider import DependencyOverrider diff --git a/tests/validation/test_dataframe.py b/tests/validation/test_dataframe.py index c9676456..378cdd7d 100644 --- a/tests/validation/test_dataframe.py +++ b/tests/validation/test_dataframe.py @@ -1,13 +1,11 @@ import itertools from unittest import TestCase +from unittest.mock import patch +import cdot.hgvs.dataproviders import numpy as np import pandas as pd import pytest -import cdot.hgvs.dataproviders - -from unittest.mock import patch -from tests.helpers.constants import VALID_ACCESSION, TEST_CDOT_TRANSCRIPT from mavedb.lib.validation.constants.general import ( hgvs_nt_column, @@ -33,6 +31,7 @@ validate_variant_formatting, ) from mavedb.lib.validation.exceptions import ValidationError +from tests.helpers.constants import TEST_CDOT_TRANSCRIPT, VALID_ACCESSION @pytest.fixture diff --git a/tests/validation/test_identifier.py b/tests/validation/test_identifier.py index f960edb7..00527830 100644 --- a/tests/validation/test_identifier.py +++ b/tests/validation/test_identifier.py @@ -1,16 +1,16 @@ from unittest import TestCase +from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.identifier import ( + validate_ensembl_identifier, + validate_ensembl_list, + validate_genome_identifier, + validate_refseq_identifier, + validate_refseq_list, validate_sra_identifier, validate_sra_list, validate_uniprot_identifier, - validate_refseq_identifier, - validate_refseq_list, - validate_genome_identifier, - validate_ensembl_identifier, - validate_ensembl_list, ) -from mavedb.lib.validation.exceptions import ValidationError class TestGenomeValidators(TestCase): diff --git a/tests/validation/test_keywords.py b/tests/validation/test_keywords.py index 59f5df0a..1b286da9 100644 --- a/tests/validation/test_keywords.py +++ b/tests/validation/test_keywords.py @@ -1,13 +1,12 @@ from unittest import TestCase +from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.keywords import ( - validate_keyword, validate_description, validate_duplicates, - validate_keyword_keys + validate_keyword, + validate_keyword_keys, ) -from mavedb.lib.validation.exceptions import ValidationError - from mavedb.view_models.experiment_controlled_keyword import ExperimentControlledKeywordCreate from tests.helpers.constants import TEST_DESCRIPTION @@ -46,7 +45,7 @@ def test_duplicate_keys(self): "key": "Variant Library Creation Method", "value": "Endogenous locus library method", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) @@ -54,7 +53,7 @@ def test_duplicate_keys(self): "key": "Variant Library Creation Method", "value": "SaCas9", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) keyword_list = [keyword_obj1, keyword_obj2] @@ -64,19 +63,19 @@ def test_duplicate_keys(self): def test_duplicate_values(self): # Invalid keywords list. keyword1 = { - "key": "Variant Library Creation Method", - "value": "Endogenous locus library method", - "special": False, - "description": TEST_DESCRIPTION - } + "key": "Variant Library Creation Method", + "value": "Endogenous locus library method", + "special": False, + "description": TEST_DESCRIPTION, + } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) keyword2 = { - "key": "Endogenous Locus Library Method System", - "value": "Endogenous locus library method", - "special": False, - "description": TEST_DESCRIPTION - } + "key": "Endogenous Locus Library Method System", + "value": "Endogenous locus library method", + "special": False, + "description": TEST_DESCRIPTION, + } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) keyword_list = [keyword_obj1, keyword_obj2] with self.assertRaises(ValidationError): @@ -88,7 +87,7 @@ def test_duplicate_values_but_they_are_other(self): "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) @@ -96,7 +95,7 @@ def test_duplicate_values_but_they_are_other(self): "key": "Endogenous Locus Library Method System", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) keyword_list = [keyword_obj1, keyword_obj2] @@ -108,7 +107,7 @@ def test_variant_library_value_is_endogenous_and_another_keywords_keys_are_endog "key": "Variant Library Creation Method", "value": "Endogenous locus library method", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) @@ -116,7 +115,7 @@ def test_variant_library_value_is_endogenous_and_another_keywords_keys_are_endog "key": "Endogenous Locus Library Method System", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) @@ -124,7 +123,7 @@ def test_variant_library_value_is_endogenous_and_another_keywords_keys_are_endog "key": "Endogenous Locus Library Method Mechanism", "value": "Nuclease", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj3 = ExperimentControlledKeywordCreate(keyword=keyword3, description=TEST_DESCRIPTION) keyword_list = [keyword_obj1, keyword_obj2, keyword_obj3] @@ -136,7 +135,7 @@ def test_variant_library_value_is_endogenous_but_another_keywords_keys_are_in_vi "key": "Variant Library Creation Method", "value": "Endogenous locus library method", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) @@ -144,7 +143,7 @@ def test_variant_library_value_is_endogenous_but_another_keywords_keys_are_in_vi "key": "In Vitro Construct Library Method System", "value": "Oligo-directed mutagenic PCR", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) @@ -152,7 +151,7 @@ def test_variant_library_value_is_endogenous_but_another_keywords_keys_are_in_vi "key": "In Vitro Construct Library Method Mechanism", "value": "Native locus replacement", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj3 = ExperimentControlledKeywordCreate(keyword=keyword3, description=TEST_DESCRIPTION) keyword_list = [keyword_obj1, keyword_obj2, keyword_obj3] @@ -165,7 +164,7 @@ def test_variant_library_value_is_in_vitro_and_another_keywords_keys_are_both_in "key": "Variant Library Creation Method", "value": "In vitro construct library method", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) @@ -173,7 +172,7 @@ def test_variant_library_value_is_in_vitro_and_another_keywords_keys_are_both_in "key": "In Vitro Construct Library Method System", "value": "Oligo-directed mutagenic PCR", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) @@ -181,7 +180,7 @@ def test_variant_library_value_is_in_vitro_and_another_keywords_keys_are_both_in "key": "In Vitro Construct Library Method Mechanism", "value": "Native locus replacement", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj3 = ExperimentControlledKeywordCreate(keyword=keyword3, description=TEST_DESCRIPTION) keyword_list = [keyword_obj1, keyword_obj2, keyword_obj3] @@ -193,7 +192,7 @@ def test_variant_library_value_is_in_vitro_but_another_keywords_keys_are_endogen "key": "Variant Library Creation Method", "value": "In vitro construct library method", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) @@ -201,7 +200,7 @@ def test_variant_library_value_is_in_vitro_but_another_keywords_keys_are_endogen "key": "Endogenous Locus Library Method System", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) @@ -209,7 +208,7 @@ def test_variant_library_value_is_in_vitro_but_another_keywords_keys_are_endogen "key": "Endogenous Locus Library Method Mechanism", "value": "Nuclease", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj3 = ExperimentControlledKeywordCreate(keyword=keyword3, description=TEST_DESCRIPTION) keyword_list = [keyword_obj1, keyword_obj2, keyword_obj3] @@ -222,7 +221,7 @@ def test_variant_library_value_is_other(self): "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) keyword_list = [keyword_obj1] @@ -234,7 +233,7 @@ def test_variant_library_value_is_other_but_another_keyword_key_is_endogenous(se "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) @@ -242,7 +241,7 @@ def test_variant_library_value_is_other_but_another_keyword_key_is_endogenous(se "key": "Endogenous Locus Library Method System", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) @@ -259,7 +258,7 @@ def test_variant_library_value_is_other_but_another_keyword_key_is_in_vitro(self "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj1 = ExperimentControlledKeywordCreate(keyword=keyword1, description=TEST_DESCRIPTION) @@ -267,7 +266,7 @@ def test_variant_library_value_is_other_but_another_keyword_key_is_in_vitro(self "key": "In Vitro Construct Library Method System", "value": "Oligo-directed mutagenic PCR", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj2 = ExperimentControlledKeywordCreate(keyword=keyword2, description=TEST_DESCRIPTION) diff --git a/tests/validation/test_publication.py b/tests/validation/test_publication.py index bca0abfa..f0a97cb9 100644 --- a/tests/validation/test_publication.py +++ b/tests/validation/test_publication.py @@ -1,16 +1,16 @@ from unittest import TestCase +from mavedb.lib.validation.constants.publication import valid_dbnames +from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.publication import ( - validate_publication, - validate_pubmed, - validate_biorxiv, - validate_medrxiv, identifier_valid_for, - validate_db_name, infer_identifier_from_url, + validate_biorxiv, + validate_db_name, + validate_medrxiv, + validate_publication, + validate_pubmed, ) -from mavedb.lib.validation.exceptions import ValidationError -from mavedb.lib.validation.constants.publication import valid_dbnames class TestValidateGenericPublication(TestCase): diff --git a/tests/validation/test_target.py b/tests/validation/test_target.py index 2ee9cb44..b1920bed 100644 --- a/tests/validation/test_target.py +++ b/tests/validation/test_target.py @@ -1,8 +1,8 @@ from unittest import TestCase -from mavedb.lib.validation.target import validate_sequence_category, validate_target_category, validate_target_sequence -from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.constants.target import valid_categories, valid_sequence_types +from mavedb.lib.validation.exceptions import ValidationError +from mavedb.lib.validation.target import validate_sequence_category, validate_target_category, validate_target_sequence class TestValidateTargetCategory(TestCase): diff --git a/tests/validation/test_urn_re.py b/tests/validation/test_urn_re.py index a71d5517..ed3fb96a 100644 --- a/tests/validation/test_urn_re.py +++ b/tests/validation/test_urn_re.py @@ -1,16 +1,16 @@ import re + import pytest from mavedb.lib.validation.urn_re import ( - MAVEDB_TMP_URN_RE, + MAVEDB_ANY_URN_RE, MAVEDB_EXPERIMENT_SET_URN_RE, MAVEDB_EXPERIMENT_URN_RE, MAVEDB_SCORE_SET_URN_RE, + MAVEDB_TMP_URN_RE, MAVEDB_VARIANT_URN_RE, - MAVEDB_ANY_URN_RE, ) - VALID_TMP_URNS = ("tmp:2ba10de7-18e5-4f5c-adf6-e08a2f71277b",) VALID_EXPERIMENT_SET_URNS = ( "urn:mavedb:00000011", diff --git a/tests/validation/test_utilities.py b/tests/validation/test_utilities.py index 34e258f9..49854b3c 100644 --- a/tests/validation/test_utilities.py +++ b/tests/validation/test_utilities.py @@ -1,10 +1,9 @@ from unittest import TestCase from mavedb.lib.validation.constants.general import null_values_list +from mavedb.lib.validation.utilities import construct_hgvs_pro, generate_hgvs, is_null from mavedb.lib.validation.variant import validate_hgvs_string # validate_pro_variant, validate_nt_variant -from mavedb.lib.validation.utilities import is_null, generate_hgvs, construct_hgvs_pro - class TestIsNull(TestCase): def test_valid_null_values(self): diff --git a/tests/validation/test_variant.py b/tests/validation/test_variant.py index b5c10f89..5913abcb 100644 --- a/tests/validation/test_variant.py +++ b/tests/validation/test_variant.py @@ -1,8 +1,8 @@ from unittest import TestCase -from mavedb.lib.validation.variant import validate_hgvs_string -from mavedb.lib.validation.exceptions import ValidationError from mavedb.lib.validation.constants.general import null_values_list +from mavedb.lib.validation.exceptions import ValidationError +from mavedb.lib.validation.variant import validate_hgvs_string class TestValidateHgvsString(TestCase): diff --git a/tests/view_models/test_external_gene_identifiers.py b/tests/view_models/test_external_gene_identifiers.py index a7e5fe65..5632975a 100644 --- a/tests/view_models/test_external_gene_identifiers.py +++ b/tests/view_models/test_external_gene_identifiers.py @@ -1,8 +1,8 @@ +import pytest + from mavedb.view_models.external_gene_identifier import ExternalGeneIdentifierCreate from mavedb.view_models.external_gene_identifier_offset import ExternalGeneIdentifierOffsetCreate -import pytest - def test_create_ensemble_identifier(client): # Test valid identifier diff --git a/tests/view_models/test_keyword.py b/tests/view_models/test_keyword.py index f8a2dc41..49bf783a 100644 --- a/tests/view_models/test_keyword.py +++ b/tests/view_models/test_keyword.py @@ -1,8 +1,8 @@ +import pytest + from mavedb.view_models.experiment_controlled_keyword import ExperimentControlledKeywordCreate from tests.helpers.constants import TEST_DESCRIPTION -import pytest - def test_create_keyword_with_description(): # Test valid keyword with description @@ -10,7 +10,7 @@ def test_create_keyword_with_description(): "key": "Variant Library Creation Method", "value": "Endogenous locus library method", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj = ExperimentControlledKeywordCreate(keyword=keyword, description=TEST_DESCRIPTION) assert keyword_obj.keyword.key == "Variant Library Creation Method" @@ -23,7 +23,7 @@ def test_create_keyword_without_description(): "key": "Variant Library Creation Method", "value": "Endogenous locus library method", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj = ExperimentControlledKeywordCreate(keyword=keyword, description=None) assert keyword_obj.keyword.key == "Variant Library Creation Method" @@ -36,7 +36,7 @@ def test_create_keyword_value_is_other(): "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } keyword_obj = ExperimentControlledKeywordCreate(keyword=keyword, description=TEST_DESCRIPTION) assert keyword_obj.keyword.key == "Variant Library Creation Method" @@ -49,7 +49,7 @@ def test_create_keyword_value_is_other_without_description(): "key": "Variant Library Creation Method", "value": "Other", "special": False, - "description": TEST_DESCRIPTION + "description": TEST_DESCRIPTION, } with pytest.raises(ValueError) as exc_info: ExperimentControlledKeywordCreate(keyword=keyword, description=None) diff --git a/tests/view_models/test_publication_identifier.py b/tests/view_models/test_publication_identifier.py index 051705fc..b65f9110 100644 --- a/tests/view_models/test_publication_identifier.py +++ b/tests/view_models/test_publication_identifier.py @@ -1,6 +1,7 @@ -from mavedb.view_models.publication_identifier import PublicationIdentifierCreate import pytest +from mavedb.view_models.publication_identifier import PublicationIdentifierCreate + def test_publication_identifier_create_pubmed_validator(client): # Test valid pubmed identifier @@ -42,7 +43,9 @@ def test_invalid_publication_identifier_create_validator(client): invalid_identifier = "not_an_identifier" with pytest.raises(ValueError) as exc_info: PublicationIdentifierCreate(identifier=invalid_identifier) - assert "'not_an_identifier' is not a valid DOI or a valid PubMed, bioRxiv, or medRxiv identifier." in str(exc_info.value) + assert "'not_an_identifier' is not a valid DOI or a valid PubMed, bioRxiv, or medRxiv identifier." in str( + exc_info.value + ) def test_invalid_publication_identifier_date_part_create_validator(client): @@ -50,4 +53,6 @@ def test_invalid_publication_identifier_date_part_create_validator(client): invalid_identifier = "2018.12.12.207222" with pytest.raises(ValueError) as exc_info: PublicationIdentifierCreate(identifier=invalid_identifier) - assert "'2018.12.12.207222' is not a valid DOI or a valid PubMed, bioRxiv, or medRxiv identifier." in str(exc_info.value) + assert "'2018.12.12.207222' is not a valid DOI or a valid PubMed, bioRxiv, or medRxiv identifier." in str( + exc_info.value + ) diff --git a/tests/view_models/test_score_set.py b/tests/view_models/test_score_set.py index 368d199f..c55d5349 100644 --- a/tests/view_models/test_score_set.py +++ b/tests/view_models/test_score_set.py @@ -1,12 +1,10 @@ import pytest - from fastapi.encoders import jsonable_encoder from mavedb.lib.validation.constants.score_set import default_ranges +from mavedb.view_models.publication_identifier import PublicationIdentifierCreate from mavedb.view_models.score_set import ScoreSetCreate, ScoreSetModify from mavedb.view_models.target_gene import TargetGeneCreate -from mavedb.view_models.publication_identifier import PublicationIdentifierCreate - from tests.helpers.constants import TEST_MINIMAL_SEQ_SCORESET @@ -14,9 +12,7 @@ def test_cannot_create_score_set_without_a_target(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() with pytest.raises(ValueError) as exc_info: - ScoreSetModify( - **jsonable_encoder(score_set_test, exclude={"targetGenes"}), target_genes=[] - ) + ScoreSetModify(**jsonable_encoder(score_set_test, exclude={"targetGenes"}), target_genes=[]) assert "Score sets should define at least one target." in str(exc_info.value) @@ -31,27 +27,18 @@ def test_cannot_create_score_set_with_multiple_primary_publications(): ScoreSetModify( **jsonable_encoder(score_set_test), exclude={"targetGenes"}, - target_genes=[ - TargetGeneCreate(**jsonable_encoder(target)) - for target in score_set_test["targetGenes"] - ], + target_genes=[TargetGeneCreate(**jsonable_encoder(target)) for target in score_set_test["targetGenes"]], primary_publication_identifiers=[identifier_one, identifier_two], ) - assert "multiple primary publication identifiers are not allowed" in str( - exc_info.value - ) + assert "multiple primary publication identifiers are not allowed" in str(exc_info.value) def test_cannot_create_score_set_without_target_gene_labels_when_multiple_targets_exist(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() - target_gene_one = TargetGeneCreate( - **jsonable_encoder(score_set_test["targetGenes"][0]) - ) - target_gene_two = TargetGeneCreate( - **jsonable_encoder(score_set_test["targetGenes"][0]) - ) + target_gene_one = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) + target_gene_two = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) with pytest.raises(ValueError) as exc_info: ScoreSetModify( @@ -59,21 +46,14 @@ def test_cannot_create_score_set_without_target_gene_labels_when_multiple_target target_genes=[target_gene_one, target_gene_two], ) - assert ( - "Target sequence labels cannot be empty when multiple targets are defined." - in str(exc_info.value) - ) + assert "Target sequence labels cannot be empty when multiple targets are defined." in str(exc_info.value) def test_cannot_create_score_set_with_non_unique_target_labels(): score_set_test = TEST_MINIMAL_SEQ_SCORESET.copy() - target_gene_one = TargetGeneCreate( - **jsonable_encoder(score_set_test["targetGenes"][0]) - ) - target_gene_two = TargetGeneCreate( - **jsonable_encoder(score_set_test["targetGenes"][0]) - ) + target_gene_one = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) + target_gene_two = TargetGeneCreate(**jsonable_encoder(score_set_test["targetGenes"][0])) non_unique = "BRCA1" target_gene_one.target_sequence.label = non_unique @@ -473,9 +453,7 @@ def test_cannot_create_score_set_without_default_range(present_name): with pytest.raises(ValueError) as exc_info: ScoreSetModify(**jsonable_encoder(score_set_test)) - assert "Both `normal` and `abnormal` ranges must be provided." in str( - exc_info.value - ) + assert "Both `normal` and `abnormal` ranges must be provided." in str(exc_info.value) def test_cannot_create_score_set_without_default_ranges(): diff --git a/tests/view_models/test_target_gene.py b/tests/view_models/test_target_gene.py index 29f5a8dd..564a27fd 100644 --- a/tests/view_models/test_target_gene.py +++ b/tests/view_models/test_target_gene.py @@ -1,7 +1,7 @@ -from mavedb.view_models.target_gene import TargetGeneCreate - import pytest +from mavedb.view_models.target_gene import TargetGeneCreate + def test_create_target_gene_with_sequence(): name = "UBE2I" diff --git a/tests/view_models/test_target_sequence.py b/tests/view_models/test_target_sequence.py index d2bb5696..c1b32716 100644 --- a/tests/view_models/test_target_sequence.py +++ b/tests/view_models/test_target_sequence.py @@ -1,8 +1,7 @@ -from mavedb.view_models.target_sequence import TargetSequenceCreate, sanitize_target_sequence_label -from tests.helpers.constants import TEST_TAXONOMY - import pytest +from mavedb.view_models.target_sequence import TargetSequenceCreate, sanitize_target_sequence_label +from tests.helpers.constants import TEST_TAXONOMY SEQUENCE = ( "ATGAGTATTCAACATTTCCGTGTCGCCCTTATTCCCTTTTTTGCGGCATTTTGCCTTCCTGTTTTTGCTCACCCAGAAACGCTGGTGAAAGTAAAAGATGCT" diff --git a/tests/view_models/test_user.py b/tests/view_models/test_user.py index 89a2691c..b72d0d5d 100644 --- a/tests/view_models/test_user.py +++ b/tests/view_models/test_user.py @@ -1,8 +1,8 @@ import pytest - from fastapi.encoders import jsonable_encoder -from tests.helpers.constants import TEST_USER + from mavedb.view_models.user import CurrentUserUpdate +from tests.helpers.constants import TEST_USER # There are lots of potentially invalid emails, but this test is intented to ensure diff --git a/tests/view_models/test_wild_type_sequence.py b/tests/view_models/test_wild_type_sequence.py index 6247aab0..25415fc1 100644 --- a/tests/view_models/test_wild_type_sequence.py +++ b/tests/view_models/test_wild_type_sequence.py @@ -1,23 +1,24 @@ -from mavedb.view_models.target_sequence import TargetSequenceCreate -from mavedb.view_models.taxonomy import TaxonomyCreate +import datetime import pytest -import datetime +from mavedb.view_models.target_sequence import TargetSequenceCreate +from mavedb.view_models.taxonomy import TaxonomyCreate taxonomy = TaxonomyCreate( - id = 1, - tax_id = 1, - organism_name = "Organism", - common_name = "Common name", - rank = "Rank", - has_described_species_name = False, - url = "url", - article_reference = "article_reference", - creation_date = datetime.datetime.now(), - modification_date = datetime.datetime.now(), + id=1, + tax_id=1, + organism_name="Organism", + common_name="Common name", + rank="Rank", + has_described_species_name=False, + url="url", + article_reference="article_reference", + creation_date=datetime.datetime.now(), + modification_date=datetime.datetime.now(), ) + @pytest.mark.parametrize( "sequence_type, sequence", [ diff --git a/tests/worker/conftest.py b/tests/worker/conftest.py index bf6b062b..3a2a4cf6 100644 --- a/tests/worker/conftest.py +++ b/tests/worker/conftest.py @@ -1,13 +1,13 @@ from pathlib import Path from shutil import copytree + import pytest from mavedb.models.license import License from mavedb.models.taxonomy import Taxonomy from mavedb.models.user import User - +from tests.helpers.constants import EXTRA_USER, TEST_LICENSE, TEST_TAXONOMY, TEST_USER from tests.helpers.util import create_experiment, create_seq_score_set -from tests.helpers.constants import TEST_USER, EXTRA_USER, TEST_TAXONOMY, TEST_LICENSE @pytest.fixture diff --git a/tests/worker/test_jobs.py b/tests/worker/test_jobs.py index 372dd701..18e0846a 100644 --- a/tests/worker/test_jobs.py +++ b/tests/worker/test_jobs.py @@ -1,9 +1,8 @@ -from datetime import date - from asyncio.unix_events import _UnixSelectorEventLoop from copy import deepcopy -from uuid import uuid4 +from datetime import date from unittest.mock import patch +from uuid import uuid4 import arq.jobs import cdot.hgvs.dataproviders @@ -11,30 +10,27 @@ import pandas as pd import pytest from arq import ArqRedis -from sqlalchemy import not_ +from sqlalchemy import not_, select from mavedb.data_providers.services import VRSMap from mavedb.lib.mave.constants import HGVS_NT_COLUMN from mavedb.lib.score_sets import csv_data_to_df from mavedb.lib.validation.exceptions import ValidationError -from mavedb.models.enums.processing_state import ProcessingState from mavedb.models.enums.mapping_state import MappingState +from mavedb.models.enums.processing_state import ProcessingState +from mavedb.models.mapped_variant import MappedVariant from mavedb.models.score_set import ScoreSet as ScoreSetDbModel from mavedb.models.variant import Variant -from mavedb.models.mapped_variant import MappedVariant from mavedb.view_models.experiment import Experiment, ExperimentCreate from mavedb.view_models.score_set import ScoreSet, ScoreSetCreate from mavedb.worker.jobs import ( + BACKOFF_LIMIT, + MAPPING_CURRENT_ID_NAME, + MAPPING_QUEUE_NAME, create_variants_for_score_set, map_variants_for_score_set, variant_mapper_manager, - MAPPING_QUEUE_NAME, - MAPPING_CURRENT_ID_NAME, - BACKOFF_LIMIT, ) -from sqlalchemy import select - -from tests.helpers.util import awaitable_exception from tests.helpers.constants import ( TEST_CDOT_TRANSCRIPT, TEST_MINIMAL_ACC_SCORESET, @@ -43,6 +39,7 @@ TEST_VARIANT_MAPPING_SCAFFOLD, VALID_ACCESSION, ) +from tests.helpers.util import awaitable_exception async def setup_records_and_files(async_client, data_files, input_score_set): From c76ed5187e1d6605ee8b0a151f44a4265fd017d3 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 11 Oct 2024 15:37:15 -0700 Subject: [PATCH 56/58] Lock Dependencies --- poetry.lock | 110 +--------------------------------------------------- 1 file changed, 2 insertions(+), 108 deletions(-) diff --git a/poetry.lock b/poetry.lock index a946956f..38fcd06a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -218,52 +218,6 @@ dev = ["bandit (>=1.7,<2.0)", "black (>=22.3,<23.0)", "build (>=0.8,<1.0)", "fla docs = ["mkdocs"] test = ["pytest (>=7.1,<8.0)", "pytest-cov (>=4.0,<5.0)", "pytest-optional-tests", "tox (>=3.25,<4.0)", "vcrpy"] -[[package]] -name = "black" -version = "24.10.0" -description = "The uncompromising code formatter." -optional = false -python-versions = ">=3.9" -files = [ - {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, - {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, - {file = "black-24.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:649fff99a20bd06c6f727d2a27f401331dc0cc861fb69cde910fe95b01b5928f"}, - {file = "black-24.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe4d6476887de70546212c99ac9bd803d90b42fc4767f058a0baa895013fbb3e"}, - {file = "black-24.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5a2221696a8224e335c28816a9d331a6c2ae15a2ee34ec857dcf3e45dbfa99ad"}, - {file = "black-24.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f9da3333530dbcecc1be13e69c250ed8dfa67f43c4005fb537bb426e19200d50"}, - {file = "black-24.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4007b1393d902b48b36958a216c20c4482f601569d19ed1df294a496eb366392"}, - {file = "black-24.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:394d4ddc64782e51153eadcaaca95144ac4c35e27ef9b0a42e121ae7e57a9175"}, - {file = "black-24.10.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b5e39e0fae001df40f95bd8cc36b9165c5e2ea88900167bddf258bacef9bbdc3"}, - {file = "black-24.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d37d422772111794b26757c5b55a3eade028aa3fde43121ab7b673d050949d65"}, - {file = "black-24.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:14b3502784f09ce2443830e3133dacf2c0110d45191ed470ecb04d0f5f6fcb0f"}, - {file = "black-24.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:30d2c30dc5139211dda799758559d1b049f7f14c580c409d6ad925b74a4208a8"}, - {file = "black-24.10.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1cbacacb19e922a1d75ef2b6ccaefcd6e93a2c05ede32f06a21386a04cedb981"}, - {file = "black-24.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1f93102e0c5bb3907451063e08b9876dbeac810e7da5a8bfb7aeb5a9ef89066b"}, - {file = "black-24.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ddacb691cdcdf77b96f549cf9591701d8db36b2f19519373d60d31746068dbf2"}, - {file = "black-24.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:680359d932801c76d2e9c9068d05c6b107f2584b2a5b88831c83962eb9984c1b"}, - {file = "black-24.10.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:17374989640fbca88b6a448129cd1745c5eb8d9547b464f281b251dd00155ccd"}, - {file = "black-24.10.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:63f626344343083322233f175aaf372d326de8436f5928c042639a4afbbf1d3f"}, - {file = "black-24.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfa1d0cb6200857f1923b602f978386a3a2758a65b52e0950299ea014be6800"}, - {file = "black-24.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:2cd9c95431d94adc56600710f8813ee27eea544dd118d45896bb734e9d7a0dc7"}, - {file = "black-24.10.0-py3-none-any.whl", hash = "sha256:3bb2b7a1f7b685f85b11fed1ef10f8a9148bceb49853e47a294a3dd963c1dd7d"}, - {file = "black-24.10.0.tar.gz", hash = "sha256:846ea64c97afe3bc677b761787993be4991810ecc7a4a937816dd6bddedc4875"}, -] - -[package.dependencies] -click = ">=8.0.0" -mypy-extensions = ">=0.4.3" -packaging = ">=22.0" -pathspec = ">=0.9.0" -platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""} - -[package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.10)"] -jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] -uvloop = ["uvloop (>=0.15.2)"] - [[package]] name = "boto3" version = "1.34.162" @@ -982,7 +936,7 @@ files = [ name = "click" version = "8.1.7" description = "Composable command line interface toolkit" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, @@ -1342,22 +1296,6 @@ docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-autodoc-typehints (>=2. testing = ["covdefaults (>=2.3)", "coverage (>=7.6.1)", "diff-cover (>=9.2)", "pytest (>=8.3.3)", "pytest-asyncio (>=0.24)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.26.4)"] typing = ["typing-extensions (>=4.12.2)"] -[[package]] -name = "flake8" -version = "7.1.1" -description = "the modular source code checker: pep8 pyflakes and co" -optional = false -python-versions = ">=3.8.1" -files = [ - {file = "flake8-7.1.1-py2.py3-none-any.whl", hash = "sha256:597477df7860daa5aa0fdd84bf5208a043ab96b8e96ab708770ae0364dd03213"}, - {file = "flake8-7.1.1.tar.gz", hash = "sha256:049d058491e228e03e67b390f311bbf88fce2dbaa8fa673e7aea87b7198b8d38"}, -] - -[package.dependencies] -mccabe = ">=0.7.0,<0.8.0" -pycodestyle = ">=2.12.0,<2.13.0" -pyflakes = ">=3.2.0,<3.3.0" - [[package]] name = "fqfa" version = "1.3.1" @@ -2219,17 +2157,6 @@ fqfa = ">=1.2.3" [package.extras] dev = ["black", "flake8", "pre-commit", "pytest"] -[[package]] -name = "mccabe" -version = "0.7.0" -description = "McCabe checker, plugin for flake8" -optional = false -python-versions = ">=3.6" -files = [ - {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, - {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, -] - [[package]] name = "mirakuru" version = "2.5.3" @@ -2470,17 +2397,6 @@ files = [ qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["docopt", "pytest"] -[[package]] -name = "pathspec" -version = "0.12.1" -description = "Utility library for gitignore style pattern matching of file paths." -optional = false -python-versions = ">=3.8" -files = [ - {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, - {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, -] - [[package]] name = "pexpect" version = "4.9.0" @@ -2679,17 +2595,6 @@ files = [ {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, ] -[[package]] -name = "pycodestyle" -version = "2.12.1" -description = "Python style guide checker" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pycodestyle-2.12.1-py2.py3-none-any.whl", hash = "sha256:46f0fb92069a7c28ab7bb558f05bfc0110dac69a0cd23c61ea0040283a9d78b3"}, - {file = "pycodestyle-2.12.1.tar.gz", hash = "sha256:6838eae08bbce4f6accd5d5572075c63626a15ee3e6f842df996bf62f6d73521"}, -] - [[package]] name = "pycparser" version = "2.22" @@ -2760,17 +2665,6 @@ typing-extensions = ">=4.2.0" dotenv = ["python-dotenv (>=0.10.4)"] email = ["email-validator (>=1.0.3)"] -[[package]] -name = "pyflakes" -version = "3.2.0" -description = "passive checker of Python programs" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pyflakes-3.2.0-py2.py3-none-any.whl", hash = "sha256:84b5be138a2dfbb40689ca07e2152deb896a65c3a3e24c251c5c62489568074a"}, - {file = "pyflakes-3.2.0.tar.gz", hash = "sha256:1c61603ff154621fb2a9172037d84dca3500def8c8b630657d1701f026f8af3f"}, -] - [[package]] name = "pygments" version = "2.18.0" @@ -4226,4 +4120,4 @@ server = ["alembic", "arq", "authlib", "biocommons", "boto3", "cdot", "cryptogra [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "ba27041cc125b2646364feaa3b04821f6f786da99e2cc7047f4ffa4a0b144ce3" +content-hash = "f5a4cedf018200abbbb7eebf9d2a51110454c5dac959d3ab0601bc185e2a351c" From 2d1c953349d4bcf034cde8da6d5b0253efa9dc42 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Fri, 11 Oct 2024 15:47:08 -0700 Subject: [PATCH 57/58] Bump Version --- pyproject.toml | 2 +- src/mavedb/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 99171429..98e8a828 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "mavedb" -version = "2024.4.0" +version = "2024.4.1" description = "API for MaveDB, the database of Multiplexed Assays of Variant Effect." license = "AGPL-3.0-only" readme = "README.md" diff --git a/src/mavedb/__init__.py b/src/mavedb/__init__.py index 7308262e..5ad78a08 100644 --- a/src/mavedb/__init__.py +++ b/src/mavedb/__init__.py @@ -6,6 +6,6 @@ logger = module_logging.getLogger(__name__) __project__ = "mavedb-api" -__version__ = "2024.4.0" +__version__ = "2024.4.1" logger.info(f"MaveDB {__version__}") From db5b952c2b464cc66556b3fdb9e9aff827cbb171 Mon Sep 17 00:00:00 2001 From: Ben Capodanno Date: Wed, 16 Oct 2024 13:28:10 -0700 Subject: [PATCH 58/58] Fixed #336: Score ranges were persisting despite being removed from updated item --- src/mavedb/routers/score_sets.py | 2 ++ src/mavedb/view_models/score_set.py | 20 ++++++++++++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/mavedb/routers/score_sets.py b/src/mavedb/routers/score_sets.py index 02022e40..1746e703 100644 --- a/src/mavedb/routers/score_sets.py +++ b/src/mavedb/routers/score_sets.py @@ -732,6 +732,8 @@ async def update_score_set( if item_update.score_ranges: item.score_ranges = item_update.score_ranges.dict() + else: + item.score_ranges = None # Delete the old target gene, WT sequence, and reference map. These will be deleted when we set the score set's # target_gene to None, because we have set cascade='all,delete-orphan' on ScoreSet.target_gene. (Since the diff --git a/src/mavedb/view_models/score_set.py b/src/mavedb/view_models/score_set.py index ed95baf8..0b201113 100644 --- a/src/mavedb/view_models/score_set.py +++ b/src/mavedb/view_models/score_set.py @@ -179,7 +179,10 @@ def at_least_one_target_gene_exists(cls, field_value, values): return field_value @validator("score_ranges") - def score_range_labels_must_be_unique(cls, field_value: ScoreRanges): + def score_range_labels_must_be_unique(cls, field_value: Optional[ScoreRanges]): + if field_value is None: + return None + existing_labels = [] for i, range_model in enumerate(field_value.ranges): range_model.label = range_model.label.strip() @@ -195,7 +198,10 @@ def score_range_labels_must_be_unique(cls, field_value: ScoreRanges): return field_value @validator("score_ranges") - def ranges_contain_normal_and_abnormal(cls, field_value: ScoreRanges): + def ranges_contain_normal_and_abnormal(cls, field_value: Optional[ScoreRanges]): + if field_value is None: + return None + ranges = set([range_model.classification for range_model in field_value.ranges]) if not set(default_ranges).issubset(ranges): raise ValidationError( @@ -207,7 +213,7 @@ def ranges_contain_normal_and_abnormal(cls, field_value: ScoreRanges): return field_value @validator("score_ranges") - def ranges_do_not_overlap(cls, field_value: ScoreRanges): + def ranges_do_not_overlap(cls, field_value: Optional[ScoreRanges]): def test_overlap(tp1, tp2) -> bool: # Always check the tuple with the lowest lower bound. If we do not check # overlaps in this manner, checking the overlap of (0,1) and (1,2) will @@ -226,6 +232,9 @@ def test_overlap(tp1, tp2) -> bool: return False + if field_value is None: + return None + for i, range_test in enumerate(field_value.ranges): for range_check in list(field_value.ranges)[i + 1 :]: if test_overlap(range_test.range, range_check.range): @@ -237,7 +246,10 @@ def test_overlap(tp1, tp2) -> bool: return field_value @validator("score_ranges") - def wild_type_score_in_normal_range(cls, field_value: ScoreRanges): + def wild_type_score_in_normal_range(cls, field_value: Optional[ScoreRanges]): + if field_value is None: + return None + normal_ranges = [ range_model.range for range_model in field_value.ranges if range_model.classification == "normal" ]