From 8de0beab93923bdfa803ecad36da98bb0ca6618b Mon Sep 17 00:00:00 2001 From: hamshkhawar Date: Mon, 8 Apr 2024 08:04:34 -0500 Subject: [PATCH 1/6] added new tabular plugins --- .bumpversion.cfg | 23 + .flake8 | 4 + .gitignore | 21 +- .pre-commit-config.yaml | 30 +- CODEOWNERS | 1 + Jenkinsfile | 75 + README.md | 0 VERSION | 1 + .../k-means-clustering-tool/.bumpversion.cfg | 34 + .../k-means-clustering-tool/CHANGELOG.md | 13 + clustering/k-means-clustering-tool/Dockerfile | 20 + clustering/k-means-clustering-tool/README.md | 65 + clustering/k-means-clustering-tool/VERSION | 1 + .../k-means-clustering-tool/build-docker.sh | 4 + clustering/k-means-clustering-tool/ict.yaml | 97 + .../k-meansclustering.cwl | 48 + .../k-means-clustering-tool/plugin.json | 114 + .../k-means-clustering-tool/pyproject.toml | 34 + .../k-means-clustering-tool/run-plugin.sh | 29 + .../tabular/clustering/k_means/__init__.py | 3 + .../tabular/clustering/k_means/__main__.py | 119 + .../tabular/clustering/k_means/k_means.py | 215 + .../polus/tabular/clustering/k_means/utils.py | 12 + .../k-means-clustering-tool/tests/__init__.py | 1 + .../k-means-clustering-tool/tests/conftest.py | 91 + .../tests/test_main.py | 142 + .../outlier-removal-tool/.bumpversion.cfg | 35 + clustering/outlier-removal-tool/.dockerignore | 4 + clustering/outlier-removal-tool/.gitignore | 23 + clustering/outlier-removal-tool/CHANGELOG.md | 15 + clustering/outlier-removal-tool/Dockerfile | 20 + clustering/outlier-removal-tool/README.md | 52 + clustering/outlier-removal-tool/VERSION | 1 + .../outlier-removal-tool/build-docker.sh | 4 + clustering/outlier-removal-tool/ict.yaml | 77 + .../outlier-removal-tool/images/Global.PNG | Bin 0 -> 147882 bytes .../outlier-removal-tool/images/Local.PNG | Bin 0 -> 221024 bytes .../outlier-removal-tool/outlierremoval.cwl | 40 + .../outlier-removal-tool/package-release.sh | 16 + clustering/outlier-removal-tool/plugin.json | 116 + .../outlier-removal-tool/pyproject.toml | 32 + clustering/outlier-removal-tool/run-docker.sh | 19 + .../clustering/outlier_removal/__init__.py | 3 + .../clustering/outlier_removal/__main__.py | 115 + .../outlier_removal/outlier_removal.py | 135 + .../outlier-removal-tool/tests/__init__.py | 1 + .../outlier-removal-tool/tests/conftest.py | 54 + .../outlier-removal-tool/tests/test_cli.py | 59 + .../tests/test_outlier_removal.py | 46 + .../Dockerfile | 24 + .../polus-feature-subsetting-plugin/README.md | 56 + .../polus-feature-subsetting-plugin/VERSION | 1 + .../build-docker.sh | 4 + .../featuresubsetting.cwl | 60 + .../polus-feature-subsetting-plugin/ict.yaml | 123 + .../plugin.json | 139 + .../src/main.py | 288 + .../src/requirements.txt | 2 + .../Dockerfile | 10 + .../polus-hdbscan-clustering-plugin/README.md | 48 + .../polus-hdbscan-clustering-plugin/VERSION | 1 + .../build-docker.sh | 4 + .../hdbscanclustering.cwl | 44 + .../polus-hdbscan-clustering-plugin/ict.yaml | 82 + .../plugin.json | 89 + .../run-docker.sh | 25 + .../src/main.py | 176 + .../src/requirements.txt | 2 + .../.bumpversion.cfg | 27 + .../feature-segmentation-eval-tool/Dockerfile | 20 + .../feature-segmentation-eval-tool/README.md | 28 + .../feature-segmentation-eval-tool/VERSION | 1 + .../build-docker.sh | 4 + .../feature-segmentation-eval-tool/ict.yaml | 83 + .../plugin.json | 94 + .../pyproject.toml | 35 + .../run-plugin.sh | 22 + .../feature_segmentation_eval/__init__.py | 4 + .../feature_segmentation_eval/__main__.py | 118 + .../feature_evaluation.py | 468 + .../feature_segmentation_eval/metrics.py | 247 + .../tests/__init__.py | 1 + .../tests/conftest.py | 90 + .../tests/test_cli.py | 41 + .../tests/test_feature_single.py | 40 + .../polus-csv-statistics-plugin/Dockerfile | 17 + .../polus-csv-statistics-plugin/README.md | 37 + features/polus-csv-statistics-plugin/VERSION | 1 + .../build-docker.sh | 4 + features/polus-csv-statistics-plugin/ict.yaml | 60 + .../polus-csv-statistics-plugin/plugin.json | 146 + .../polus-csv-statistics-plugin/src/main.py | 309 + .../src/requirements.txt | 1 + .../arrow-to-tabular-tool/.bumpversion.cfg | 27 + formats/arrow-to-tabular-tool/.gitignore | 175 + formats/arrow-to-tabular-tool/Dockerfile | 20 + formats/arrow-to-tabular-tool/README.md | 29 + formats/arrow-to-tabular-tool/VERSION | 1 + .../arrow-to-tabular-tool/arrowtotabular.cwl | 28 + formats/arrow-to-tabular-tool/build-docker.sh | 4 + formats/arrow-to-tabular-tool/ict.yaml | 45 + .../arrow-to-tabular-tool/package-release.sh | 13 + formats/arrow-to-tabular-tool/plugin.json | 59 + formats/arrow-to-tabular-tool/pyproject.toml | 32 + formats/arrow-to-tabular-tool/run-plugin.sh | 25 + .../formats/arrow_to_tabular/__init__.py | 4 + .../formats/arrow_to_tabular/__main__.py | 111 + .../arrow_to_tabular/arrow_to_tabular.py | 53 + .../arrow-to-tabular-tool/tests/__init__.py | 1 + .../arrow-to-tabular-tool/tests/test_main.py | 69 + .../Dockerfile | 8 + .../README.md | 31 + .../polus-fcs-to-csv-converter-plugin/VERSION | 1 + .../build-docker.sh | 4 + .../fcstocsvfileconverter.cwl | 24 + .../ict.yaml | 30 + .../plugin.json | 34 + .../src/main.py | 78 + .../src/requirements.txt | 1 + .../tabular-converter-tool/.bumpversion.cfg | 27 + formats/tabular-converter-tool/.gitignore | 175 + formats/tabular-converter-tool/Dockerfile | 20 + formats/tabular-converter-tool/README.md | 41 + formats/tabular-converter-tool/VERSION | 1 + .../tabular-converter-tool/build-docker.sh | 4 + formats/tabular-converter-tool/ict.yaml | 61 + .../tabular-converter-tool/package-release.sh | 13 + formats/tabular-converter-tool/plugin.json | 75 + formats/tabular-converter-tool/pyproject.toml | 36 + formats/tabular-converter-tool/run-plugin.sh | 26 + .../formats/tabular_converter/__init__.py | 5 + .../formats/tabular_converter/__main__.py | 99 + .../tabular_converter/tabular_converter.py | 158 + .../tabularconverter.cwl | 32 + .../tabular-converter-tool/tests/__init__.py | 1 + .../tabular-converter-tool/tests/test_main.py | 173 + .../tabular-to-arrow-tool/.bumpversion.cfg | 27 + formats/tabular-to-arrow-tool/.gitignore | 175 + formats/tabular-to-arrow-tool/Dockerfile | 20 + formats/tabular-to-arrow-tool/README.md | 34 + formats/tabular-to-arrow-tool/VERSION | 1 + formats/tabular-to-arrow-tool/build-docker.sh | 4 + formats/tabular-to-arrow-tool/ict.yaml | 48 + .../tabular-to-arrow-tool/package-release.sh | 16 + formats/tabular-to-arrow-tool/plugin.json | 60 + formats/tabular-to-arrow-tool/pyproject.toml | 33 + formats/tabular-to-arrow-tool/run-plugin.sh | 24 + .../formats/tabular_to_arrow/__init__.py | 7 + .../formats/tabular_to_arrow/__main__.py | 98 + .../tabular_arrow_converter.py | 131 + .../tabular-to-arrow-tool/tabulartoarrow.cwl | 28 + .../tabular-to-arrow-tool/tests/__init__.py | 1 + .../tabular-to-arrow-tool/tests/test_main.py | 138 + noxfile.py | 26 + package.json | 30 + pyproject.toml | 63 + to_clt.py | 108 + to_ict.py | 99 + transforms/polus-csv-merger-plugin/README.md | 35 + transforms/polus-csv-merger-plugin/VERSION | 1 + .../polus-csv-merger-plugin/build-docker.sh | 4 + .../polus-csv-merger-plugin/csvmerger.cwl | 36 + transforms/polus-csv-merger-plugin/ict.yaml | 68 + .../polus-csv-merger-plugin/plugin.json | 77 + .../polus-csv-merger-plugin/run-plugin.sh | 28 + .../polus-csv-merger-plugin/src/main.py | 242 + .../src/requirements.txt | 3 + .../Dockerfile | 20 + .../README.md | 54 + .../VERSION | 1 + .../build-docker.sh | 4 + .../generalizedlinearmodel.cwl | 32 + .../ict.yaml | 86 + .../plugin.json | 98 + .../src/Requirements.R | 9 + .../src/main.R | 237 + .../.bumpversion.cfg | 27 + .../tabular-thresholding-tool/.gitignore | 175 + .../tabular-thresholding-tool/Dockerfile | 20 + .../tabular-thresholding-tool/README.md | 47 + transforms/tabular-thresholding-tool/VERSION | 1 + .../tabular-thresholding-tool/build-docker.sh | 2 + transforms/tabular-thresholding-tool/ict.yaml | 146 + .../package-release.sh | 13 + .../tabular-thresholding-tool/plugin.json | 177 + .../tabular-thresholding-tool/pyproject.toml | 34 + .../tabular-thresholding-tool/run-plugin.sh | 41 + .../tabular/tabular_thresholding/__init__.py | 4 + .../tabular/tabular_thresholding/__main__.py | 158 + .../tabular_thresholding.py | 169 + .../thresholding/__init__.py | 6 + .../thresholding/custom_fpr.py | 36 + .../thresholding/n_sigma.py | 18 + .../tabular_thresholding/thresholding/otsu.py | 45 + .../tabular-thresholding-plugin.cwl | 60 + .../tests/__init_.py | 1 + .../tests/test_main.py | 144 + utils/filepattern-generator-plugin/Dockerfile | 9 + utils/filepattern-generator-plugin/README.md | 36 + utils/filepattern-generator-plugin/VERSION | 1 + .../build-docker.sh | 4 + .../bumpversion.cfg | 10 + .../filepatterngenerator.cwl | 36 + utils/filepattern-generator-plugin/ict.yaml | 64 + .../package-release.sh | 16 + .../filepattern-generator-plugin/plugin.json | 74 + .../run-plugin.sh | 27 + .../filepattern-generator-plugin/src/main.py | 204 + .../src/requirements.txt | 1 + .../tests/test_main.py | 43 + .../tests/version_test.py | 46 + utils/polus-csv-collection-merger/Dockerfile | 4 + utils/polus-csv-collection-merger/README.md | 45 + utils/polus-csv-collection-merger/VERSION | 1 + .../csvcollectionsmerger.cwl | 28 + utils/polus-csv-collection-merger/ict.yaml | 61 + utils/polus-csv-collection-merger/plugin.json | 61 + utils/polus-csv-collection-merger/script.sh | 61 + utils/polus-python-template/.bumpversion.cfg | 23 + utils/polus-python-template/.gitignore | 1 + utils/polus-python-template/CHANGELOG.md | 9 + utils/polus-python-template/README.md | 112 + utils/polus-python-template/VERSION | 1 + utils/polus-python-template/cookiecutter.json | 16 + .../hooks/post_gen_project.py | 63 + .../hooks/pre_gen_project.py | 55 + utils/polus-python-template/pyproject.toml | 32 + .../.bumpversion.cfg | 29 + .../.dockerignore | 4 + .../.gitignore | 1 + .../CHANGELOG.md | 5 + .../Dockerfile | 26 + .../{{cookiecutter.container_name}}/README.md | 23 + .../{{cookiecutter.container_name}}/VERSION | 1 + .../build-docker.sh | 4 + .../plugin.json | 63 + .../pyproject.toml | 32 + .../run-plugin.sh | 20 + .../__init__.py | 7 + .../__main__.py | 87 + .../{{ cookiecutter.package_name }}.py | 16 + .../tests/__init__.py | 1 + .../tests/conftest.py | 147 + .../tests/test_cli.py | 96 + .../test_{{cookiecutter.package_name}}.py | 22 + .../Dockerfile | 4 + .../README.md | 29 + .../VERSION | 1 + .../build-docker.sh | 4 + .../ict.yaml | 70 + .../main.py | 77 + .../mergestitchingvector.cwl | 40 + .../plugin.json | 78 + utils/rxiv-download-tool/.bumpversion.cfg | 29 + utils/rxiv-download-tool/.dockerignore | 4 + utils/rxiv-download-tool/.gitignore | 2 + utils/rxiv-download-tool/Dockerfile | 20 + utils/rxiv-download-tool/README.md | 30 + utils/rxiv-download-tool/VERSION | 1 + utils/rxiv-download-tool/build-docker.sh | 4 + .../downloadrxivtextdata.cwl | 32 + .../examples/arXiv_20231218_0.json | 182 + .../examples/arXiv_20231218_0.xml | 127 + utils/rxiv-download-tool/ict.yaml | 51 + utils/rxiv-download-tool/plugin.json | 70 + utils/rxiv-download-tool/pyproject.toml | 41 + utils/rxiv-download-tool/run-plugin.sh | 20 + .../images/utils/rxiv_download/__init__.py | 3 + .../images/utils/rxiv_download/__main__.py | 72 + .../polus/images/utils/rxiv_download/fetch.py | 217 + utils/rxiv-download-tool/tests/__init__.py | 1 + utils/rxiv-download-tool/tests/conftest.py | 41 + utils/rxiv-download-tool/tests/test_cli.py | 51 + utils/rxiv-download-tool/tests/test_fetch.py | 43 + .../Dockerfile | 24 + .../README.md | 40 + .../VERSION | 1 + .../build-docker.sh | 4 + .../graphpyramidbuilding.cwl | 32 + .../ict.yaml | 63 + .../plugin.json | 72 + .../src/dl_fi.py | 2 + .../src/main.py | 806 + .../src/requirements.txt | 4 + .../.bumpversion.cfg | 27 + .../tabular-to-microjson-tool/Dockerfile | 20 + .../tabular-to-microjson-tool/README.md | 62 + .../tabular-to-microjson-tool/VERSION | 1 + .../tabular-to-microjson-tool/build-docker.sh | 4 + .../examples/example_overlay_Point.json | 9255 +++++++++ .../examples/example_overlay_Polygon.json | 16935 ++++++++++++++++ .../tabular-to-microjson-tool/ict.yaml | 80 + .../tabular-to-microjson-tool/plugin.json | 96 + .../tabular-to-microjson-tool/pyproject.toml | 30 + .../tabular-to-microjson-tool/run-plugin.sh | 28 + .../tabular_to_microjson/__init__.py | 2 + .../tabular_to_microjson/__main__.py | 124 + .../tabular_to_microjson/microjson_overlay.py | 413 + .../tabulartomicrojson.cwl | 44 + .../tests/__init__.py | 1 + .../tests/test_microjson_overlay.py | 244 + 301 files changed, 42001 insertions(+), 9 deletions(-) create mode 100644 .bumpversion.cfg create mode 100644 .flake8 create mode 100644 CODEOWNERS create mode 100644 Jenkinsfile create mode 100644 README.md create mode 100644 VERSION create mode 100644 clustering/k-means-clustering-tool/.bumpversion.cfg create mode 100644 clustering/k-means-clustering-tool/CHANGELOG.md create mode 100644 clustering/k-means-clustering-tool/Dockerfile create mode 100644 clustering/k-means-clustering-tool/README.md create mode 100644 clustering/k-means-clustering-tool/VERSION create mode 100644 clustering/k-means-clustering-tool/build-docker.sh create mode 100644 clustering/k-means-clustering-tool/ict.yaml create mode 100644 clustering/k-means-clustering-tool/k-meansclustering.cwl create mode 100644 clustering/k-means-clustering-tool/plugin.json create mode 100644 clustering/k-means-clustering-tool/pyproject.toml create mode 100644 clustering/k-means-clustering-tool/run-plugin.sh create mode 100644 clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/__init__.py create mode 100644 clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/__main__.py create mode 100644 clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/k_means.py create mode 100644 clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/utils.py create mode 100644 clustering/k-means-clustering-tool/tests/__init__.py create mode 100644 clustering/k-means-clustering-tool/tests/conftest.py create mode 100644 clustering/k-means-clustering-tool/tests/test_main.py create mode 100644 clustering/outlier-removal-tool/.bumpversion.cfg create mode 100644 clustering/outlier-removal-tool/.dockerignore create mode 100644 clustering/outlier-removal-tool/.gitignore create mode 100644 clustering/outlier-removal-tool/CHANGELOG.md create mode 100644 clustering/outlier-removal-tool/Dockerfile create mode 100644 clustering/outlier-removal-tool/README.md create mode 100644 clustering/outlier-removal-tool/VERSION create mode 100644 clustering/outlier-removal-tool/build-docker.sh create mode 100644 clustering/outlier-removal-tool/ict.yaml create mode 100644 clustering/outlier-removal-tool/images/Global.PNG create mode 100644 clustering/outlier-removal-tool/images/Local.PNG create mode 100644 clustering/outlier-removal-tool/outlierremoval.cwl create mode 100644 clustering/outlier-removal-tool/package-release.sh create mode 100644 clustering/outlier-removal-tool/plugin.json create mode 100644 clustering/outlier-removal-tool/pyproject.toml create mode 100644 clustering/outlier-removal-tool/run-docker.sh create mode 100644 clustering/outlier-removal-tool/src/polus/tabular/clustering/outlier_removal/__init__.py create mode 100644 clustering/outlier-removal-tool/src/polus/tabular/clustering/outlier_removal/__main__.py create mode 100644 clustering/outlier-removal-tool/src/polus/tabular/clustering/outlier_removal/outlier_removal.py create mode 100644 clustering/outlier-removal-tool/tests/__init__.py create mode 100644 clustering/outlier-removal-tool/tests/conftest.py create mode 100644 clustering/outlier-removal-tool/tests/test_cli.py create mode 100644 clustering/outlier-removal-tool/tests/test_outlier_removal.py create mode 100644 clustering/polus-feature-subsetting-plugin/Dockerfile create mode 100644 clustering/polus-feature-subsetting-plugin/README.md create mode 100644 clustering/polus-feature-subsetting-plugin/VERSION create mode 100644 clustering/polus-feature-subsetting-plugin/build-docker.sh create mode 100644 clustering/polus-feature-subsetting-plugin/featuresubsetting.cwl create mode 100644 clustering/polus-feature-subsetting-plugin/ict.yaml create mode 100644 clustering/polus-feature-subsetting-plugin/plugin.json create mode 100644 clustering/polus-feature-subsetting-plugin/src/main.py create mode 100644 clustering/polus-feature-subsetting-plugin/src/requirements.txt create mode 100644 clustering/polus-hdbscan-clustering-plugin/Dockerfile create mode 100644 clustering/polus-hdbscan-clustering-plugin/README.md create mode 100644 clustering/polus-hdbscan-clustering-plugin/VERSION create mode 100755 clustering/polus-hdbscan-clustering-plugin/build-docker.sh create mode 100644 clustering/polus-hdbscan-clustering-plugin/hdbscanclustering.cwl create mode 100644 clustering/polus-hdbscan-clustering-plugin/ict.yaml create mode 100644 clustering/polus-hdbscan-clustering-plugin/plugin.json create mode 100755 clustering/polus-hdbscan-clustering-plugin/run-docker.sh create mode 100644 clustering/polus-hdbscan-clustering-plugin/src/main.py create mode 100644 clustering/polus-hdbscan-clustering-plugin/src/requirements.txt create mode 100644 features/feature-segmentation-eval-tool/.bumpversion.cfg create mode 100644 features/feature-segmentation-eval-tool/Dockerfile create mode 100644 features/feature-segmentation-eval-tool/README.md create mode 100644 features/feature-segmentation-eval-tool/VERSION create mode 100644 features/feature-segmentation-eval-tool/build-docker.sh create mode 100644 features/feature-segmentation-eval-tool/ict.yaml create mode 100644 features/feature-segmentation-eval-tool/plugin.json create mode 100644 features/feature-segmentation-eval-tool/pyproject.toml create mode 100644 features/feature-segmentation-eval-tool/run-plugin.sh create mode 100644 features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/__init__.py create mode 100644 features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/__main__.py create mode 100644 features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/feature_evaluation.py create mode 100644 features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/metrics.py create mode 100644 features/feature-segmentation-eval-tool/tests/__init__.py create mode 100644 features/feature-segmentation-eval-tool/tests/conftest.py create mode 100644 features/feature-segmentation-eval-tool/tests/test_cli.py create mode 100644 features/feature-segmentation-eval-tool/tests/test_feature_single.py create mode 100644 features/polus-csv-statistics-plugin/Dockerfile create mode 100644 features/polus-csv-statistics-plugin/README.md create mode 100644 features/polus-csv-statistics-plugin/VERSION create mode 100755 features/polus-csv-statistics-plugin/build-docker.sh create mode 100644 features/polus-csv-statistics-plugin/ict.yaml create mode 100644 features/polus-csv-statistics-plugin/plugin.json create mode 100644 features/polus-csv-statistics-plugin/src/main.py create mode 100644 features/polus-csv-statistics-plugin/src/requirements.txt create mode 100644 formats/arrow-to-tabular-tool/.bumpversion.cfg create mode 100644 formats/arrow-to-tabular-tool/.gitignore create mode 100644 formats/arrow-to-tabular-tool/Dockerfile create mode 100644 formats/arrow-to-tabular-tool/README.md create mode 100644 formats/arrow-to-tabular-tool/VERSION create mode 100644 formats/arrow-to-tabular-tool/arrowtotabular.cwl create mode 100755 formats/arrow-to-tabular-tool/build-docker.sh create mode 100644 formats/arrow-to-tabular-tool/ict.yaml create mode 100755 formats/arrow-to-tabular-tool/package-release.sh create mode 100644 formats/arrow-to-tabular-tool/plugin.json create mode 100644 formats/arrow-to-tabular-tool/pyproject.toml create mode 100755 formats/arrow-to-tabular-tool/run-plugin.sh create mode 100644 formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/__init__.py create mode 100644 formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/__main__.py create mode 100644 formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/arrow_to_tabular.py create mode 100644 formats/arrow-to-tabular-tool/tests/__init__.py create mode 100644 formats/arrow-to-tabular-tool/tests/test_main.py create mode 100644 formats/polus-fcs-to-csv-converter-plugin/Dockerfile create mode 100644 formats/polus-fcs-to-csv-converter-plugin/README.md create mode 100644 formats/polus-fcs-to-csv-converter-plugin/VERSION create mode 100644 formats/polus-fcs-to-csv-converter-plugin/build-docker.sh create mode 100644 formats/polus-fcs-to-csv-converter-plugin/fcstocsvfileconverter.cwl create mode 100644 formats/polus-fcs-to-csv-converter-plugin/ict.yaml create mode 100644 formats/polus-fcs-to-csv-converter-plugin/plugin.json create mode 100644 formats/polus-fcs-to-csv-converter-plugin/src/main.py create mode 100644 formats/polus-fcs-to-csv-converter-plugin/src/requirements.txt create mode 100644 formats/tabular-converter-tool/.bumpversion.cfg create mode 100644 formats/tabular-converter-tool/.gitignore create mode 100644 formats/tabular-converter-tool/Dockerfile create mode 100644 formats/tabular-converter-tool/README.md create mode 100644 formats/tabular-converter-tool/VERSION create mode 100644 formats/tabular-converter-tool/build-docker.sh create mode 100644 formats/tabular-converter-tool/ict.yaml create mode 100644 formats/tabular-converter-tool/package-release.sh create mode 100644 formats/tabular-converter-tool/plugin.json create mode 100644 formats/tabular-converter-tool/pyproject.toml create mode 100644 formats/tabular-converter-tool/run-plugin.sh create mode 100644 formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/__init__.py create mode 100644 formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/__main__.py create mode 100644 formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/tabular_converter.py create mode 100644 formats/tabular-converter-tool/tabularconverter.cwl create mode 100644 formats/tabular-converter-tool/tests/__init__.py create mode 100644 formats/tabular-converter-tool/tests/test_main.py create mode 100644 formats/tabular-to-arrow-tool/.bumpversion.cfg create mode 100644 formats/tabular-to-arrow-tool/.gitignore create mode 100644 formats/tabular-to-arrow-tool/Dockerfile create mode 100644 formats/tabular-to-arrow-tool/README.md create mode 100644 formats/tabular-to-arrow-tool/VERSION create mode 100755 formats/tabular-to-arrow-tool/build-docker.sh create mode 100644 formats/tabular-to-arrow-tool/ict.yaml create mode 100755 formats/tabular-to-arrow-tool/package-release.sh create mode 100644 formats/tabular-to-arrow-tool/plugin.json create mode 100644 formats/tabular-to-arrow-tool/pyproject.toml create mode 100755 formats/tabular-to-arrow-tool/run-plugin.sh create mode 100644 formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/__init__.py create mode 100644 formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/__main__.py create mode 100644 formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/tabular_arrow_converter.py create mode 100644 formats/tabular-to-arrow-tool/tabulartoarrow.cwl create mode 100644 formats/tabular-to-arrow-tool/tests/__init__.py create mode 100644 formats/tabular-to-arrow-tool/tests/test_main.py create mode 100644 noxfile.py create mode 100644 package.json create mode 100644 pyproject.toml create mode 100644 to_clt.py create mode 100644 to_ict.py create mode 100644 transforms/polus-csv-merger-plugin/README.md create mode 100644 transforms/polus-csv-merger-plugin/VERSION create mode 100755 transforms/polus-csv-merger-plugin/build-docker.sh create mode 100644 transforms/polus-csv-merger-plugin/csvmerger.cwl create mode 100644 transforms/polus-csv-merger-plugin/ict.yaml create mode 100644 transforms/polus-csv-merger-plugin/plugin.json create mode 100644 transforms/polus-csv-merger-plugin/run-plugin.sh create mode 100644 transforms/polus-csv-merger-plugin/src/main.py create mode 100644 transforms/polus-csv-merger-plugin/src/requirements.txt create mode 100644 transforms/polus-generalized-linear-model-plugin/Dockerfile create mode 100644 transforms/polus-generalized-linear-model-plugin/README.md create mode 100644 transforms/polus-generalized-linear-model-plugin/VERSION create mode 100644 transforms/polus-generalized-linear-model-plugin/build-docker.sh create mode 100644 transforms/polus-generalized-linear-model-plugin/generalizedlinearmodel.cwl create mode 100644 transforms/polus-generalized-linear-model-plugin/ict.yaml create mode 100644 transforms/polus-generalized-linear-model-plugin/plugin.json create mode 100644 transforms/polus-generalized-linear-model-plugin/src/Requirements.R create mode 100644 transforms/polus-generalized-linear-model-plugin/src/main.R create mode 100644 transforms/tabular-thresholding-tool/.bumpversion.cfg create mode 100644 transforms/tabular-thresholding-tool/.gitignore create mode 100644 transforms/tabular-thresholding-tool/Dockerfile create mode 100644 transforms/tabular-thresholding-tool/README.md create mode 100644 transforms/tabular-thresholding-tool/VERSION create mode 100644 transforms/tabular-thresholding-tool/build-docker.sh create mode 100644 transforms/tabular-thresholding-tool/ict.yaml create mode 100644 transforms/tabular-thresholding-tool/package-release.sh create mode 100644 transforms/tabular-thresholding-tool/plugin.json create mode 100644 transforms/tabular-thresholding-tool/pyproject.toml create mode 100755 transforms/tabular-thresholding-tool/run-plugin.sh create mode 100644 transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/__init__.py create mode 100644 transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/__main__.py create mode 100644 transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/tabular_thresholding.py create mode 100644 transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/__init__.py create mode 100644 transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/custom_fpr.py create mode 100644 transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/n_sigma.py create mode 100644 transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/otsu.py create mode 100644 transforms/tabular-thresholding-tool/tabular-thresholding-plugin.cwl create mode 100644 transforms/tabular-thresholding-tool/tests/__init_.py create mode 100644 transforms/tabular-thresholding-tool/tests/test_main.py create mode 100644 utils/filepattern-generator-plugin/Dockerfile create mode 100644 utils/filepattern-generator-plugin/README.md create mode 100644 utils/filepattern-generator-plugin/VERSION create mode 100755 utils/filepattern-generator-plugin/build-docker.sh create mode 100644 utils/filepattern-generator-plugin/bumpversion.cfg create mode 100644 utils/filepattern-generator-plugin/filepatterngenerator.cwl create mode 100644 utils/filepattern-generator-plugin/ict.yaml create mode 100755 utils/filepattern-generator-plugin/package-release.sh create mode 100644 utils/filepattern-generator-plugin/plugin.json create mode 100755 utils/filepattern-generator-plugin/run-plugin.sh create mode 100644 utils/filepattern-generator-plugin/src/main.py create mode 100644 utils/filepattern-generator-plugin/src/requirements.txt create mode 100644 utils/filepattern-generator-plugin/tests/test_main.py create mode 100644 utils/filepattern-generator-plugin/tests/version_test.py create mode 100644 utils/polus-csv-collection-merger/Dockerfile create mode 100644 utils/polus-csv-collection-merger/README.md create mode 100644 utils/polus-csv-collection-merger/VERSION create mode 100644 utils/polus-csv-collection-merger/csvcollectionsmerger.cwl create mode 100644 utils/polus-csv-collection-merger/ict.yaml create mode 100644 utils/polus-csv-collection-merger/plugin.json create mode 100644 utils/polus-csv-collection-merger/script.sh create mode 100644 utils/polus-python-template/.bumpversion.cfg create mode 100644 utils/polus-python-template/.gitignore create mode 100644 utils/polus-python-template/CHANGELOG.md create mode 100644 utils/polus-python-template/README.md create mode 100644 utils/polus-python-template/VERSION create mode 100644 utils/polus-python-template/cookiecutter.json create mode 100644 utils/polus-python-template/hooks/post_gen_project.py create mode 100644 utils/polus-python-template/hooks/pre_gen_project.py create mode 100644 utils/polus-python-template/pyproject.toml create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/.bumpversion.cfg create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/.dockerignore create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/.gitignore create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/CHANGELOG.md create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/README.md create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/VERSION create mode 100755 utils/polus-python-template/{{cookiecutter.container_name}}/build-docker.sh create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/plugin.json create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/pyproject.toml create mode 100755 utils/polus-python-template/{{cookiecutter.container_name}}/run-plugin.sh create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/__init__.py create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/__main__.py create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/{{ cookiecutter.package_name }}.py create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/tests/__init__.py create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/tests/conftest.py create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_cli.py create mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_{{cookiecutter.package_name}}.py create mode 100644 utils/polus-stitching-vector-merger-plugin/Dockerfile create mode 100644 utils/polus-stitching-vector-merger-plugin/README.md create mode 100644 utils/polus-stitching-vector-merger-plugin/VERSION create mode 100755 utils/polus-stitching-vector-merger-plugin/build-docker.sh create mode 100644 utils/polus-stitching-vector-merger-plugin/ict.yaml create mode 100644 utils/polus-stitching-vector-merger-plugin/main.py create mode 100644 utils/polus-stitching-vector-merger-plugin/mergestitchingvector.cwl create mode 100644 utils/polus-stitching-vector-merger-plugin/plugin.json create mode 100644 utils/rxiv-download-tool/.bumpversion.cfg create mode 100644 utils/rxiv-download-tool/.dockerignore create mode 100644 utils/rxiv-download-tool/.gitignore create mode 100644 utils/rxiv-download-tool/Dockerfile create mode 100644 utils/rxiv-download-tool/README.md create mode 100644 utils/rxiv-download-tool/VERSION create mode 100644 utils/rxiv-download-tool/build-docker.sh create mode 100644 utils/rxiv-download-tool/downloadrxivtextdata.cwl create mode 100644 utils/rxiv-download-tool/examples/arXiv_20231218_0.json create mode 100644 utils/rxiv-download-tool/examples/arXiv_20231218_0.xml create mode 100644 utils/rxiv-download-tool/ict.yaml create mode 100644 utils/rxiv-download-tool/plugin.json create mode 100644 utils/rxiv-download-tool/pyproject.toml create mode 100644 utils/rxiv-download-tool/run-plugin.sh create mode 100644 utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/__init__.py create mode 100644 utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/__main__.py create mode 100644 utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/fetch.py create mode 100644 utils/rxiv-download-tool/tests/__init__.py create mode 100644 utils/rxiv-download-tool/tests/conftest.py create mode 100644 utils/rxiv-download-tool/tests/test_cli.py create mode 100644 utils/rxiv-download-tool/tests/test_fetch.py create mode 100644 visualization/polus-graph-pyramid-builder-plugin/Dockerfile create mode 100644 visualization/polus-graph-pyramid-builder-plugin/README.md create mode 100644 visualization/polus-graph-pyramid-builder-plugin/VERSION create mode 100755 visualization/polus-graph-pyramid-builder-plugin/build-docker.sh create mode 100644 visualization/polus-graph-pyramid-builder-plugin/graphpyramidbuilding.cwl create mode 100644 visualization/polus-graph-pyramid-builder-plugin/ict.yaml create mode 100644 visualization/polus-graph-pyramid-builder-plugin/plugin.json create mode 100644 visualization/polus-graph-pyramid-builder-plugin/src/dl_fi.py create mode 100644 visualization/polus-graph-pyramid-builder-plugin/src/main.py create mode 100644 visualization/polus-graph-pyramid-builder-plugin/src/requirements.txt create mode 100644 visualization/tabular-to-microjson-tool/.bumpversion.cfg create mode 100644 visualization/tabular-to-microjson-tool/Dockerfile create mode 100644 visualization/tabular-to-microjson-tool/README.md create mode 100644 visualization/tabular-to-microjson-tool/VERSION create mode 100644 visualization/tabular-to-microjson-tool/build-docker.sh create mode 100644 visualization/tabular-to-microjson-tool/examples/example_overlay_Point.json create mode 100644 visualization/tabular-to-microjson-tool/examples/example_overlay_Polygon.json create mode 100644 visualization/tabular-to-microjson-tool/ict.yaml create mode 100644 visualization/tabular-to-microjson-tool/plugin.json create mode 100644 visualization/tabular-to-microjson-tool/pyproject.toml create mode 100644 visualization/tabular-to-microjson-tool/run-plugin.sh create mode 100644 visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/__init__.py create mode 100644 visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/__main__.py create mode 100644 visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/microjson_overlay.py create mode 100644 visualization/tabular-to-microjson-tool/tabulartomicrojson.cwl create mode 100644 visualization/tabular-to-microjson-tool/tests/__init__.py create mode 100644 visualization/tabular-to-microjson-tool/tests/test_microjson_overlay.py diff --git a/.bumpversion.cfg b/.bumpversion.cfg new file mode 100644 index 0000000..dace2a7 --- /dev/null +++ b/.bumpversion.cfg @@ -0,0 +1,23 @@ +[bumpversion] +current_version = 0.1.1 +commit = False +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:VERSION] diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..6a1f564 --- /dev/null +++ b/.flake8 @@ -0,0 +1,4 @@ +[flake8] +ignore = W503, E501 +max-line-length = 88 +extended-ignore = E203 diff --git a/.gitignore b/.gitignore index e968fa7..a07072c 100644 --- a/.gitignore +++ b/.gitignore @@ -157,4 +157,23 @@ cython_debug/ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. -.idea/ +#.idea/ + +# vscode +.vscode + +# test data directory +data + +# local manifests +src/polus/plugins/_plugins/manifests/* + +# allow python scripts inside manifests dir +!src/polus/plugins/_plugins/manifests/*.py + +#macOS +*.DS_Store + + +#husky +node_modules diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 652e537..6e78c94 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,11 +1,11 @@ fail_fast: true repos: - - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: - id: check-added-large-files + exclude: (.*?)\.(h5)$ - id: check-case-conflict - id: check-json - id: pretty-format-json @@ -26,27 +26,41 @@ repos: args: ["--fix=lf"] description: Forces to replace line ending by the UNIX 'lf' character. - id: trailing-whitespace - exclude: '.bumpversion.cfg' + exclude: ".bumpversion.cfg" - id: check-merge-conflict - repo: https://github.com/psf/black - rev: '23.3.0' + rev: "23.3.0" hooks: - id: black language_version: python3.9 - exclude: ^src\/polus\/plugins\/_plugins\/models\/\w*Schema.py$ + exclude: | + (?x)( + ^src\/polus\/plugins\/_plugins\/models\/pydanticv1\/\w*Schema.py$| + ^src\/polus\/plugins\/_plugins\/models\/pydanticv2\/\w*Schema.py$ + ) - repo: https://github.com/charliermarsh/ruff-pre-commit # Ruff version. - rev: 'v0.0.274' + rev: "v0.0.274" hooks: - id: ruff - exclude: ^src\/polus\/plugins\/_plugins\/models\/\w*Schema.py$ + exclude: | + (?x)( + test_[a-zA-Z0-9]+.py$| + ^src\/polus\/plugins\/_plugins\/models\/pydanticv1\/\w*Schema.py$| + ^src\/polus\/plugins\/_plugins\/models\/pydanticv2\/\w*Schema.py$ + ) args: [--fix] - repo: https://github.com/pre-commit/mirrors-mypy - rev: 'v1.4.0' + rev: "v1.4.0" hooks: - id: mypy - exclude: ^src\/polus\/plugins\/_plugins\/models\/\w*Schema.py$ + exclude: | + (?x)( + test_[a-zA-Z0-9]+.py$| + ^src\/polus\/plugins\/_plugins\/models\/pydanticv1\/\w*Schema.py$| + ^src\/polus\/plugins\/_plugins\/models\/pydanticv2\/\w*Schema.py$ + ) additional_dependencies: [types-requests==2.31.0.1] diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..0192cba --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1 @@ +* @Nicholas-Schaub @NHotaling @hsidky diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000..0fa72d1 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,75 @@ +pipeline { + agent { + node { label 'linux && build && aws' } + } + environment { + PROJECT_URL = 'https://github.com/polusai/polus-plugins' + } + triggers { + pollSCM('H/5 * * * *') + } + stages { + stage('Build Version') { + steps{ + script { + BUILD_VERSION_GENERATED = VersionNumber( + versionNumberString: 'v${BUILD_YEAR, XX}.${BUILD_MONTH, XX}${BUILD_DAY, XX}.${BUILDS_TODAY}', + projectStartDate: '1970-01-01', + skipFailedBuilds: false) + currentBuild.displayName = BUILD_VERSION_GENERATED + env.BUILD_VERSION = BUILD_VERSION_GENERATED + } + } + } + stage('Checkout source code') { + steps { + cleanWs() + checkout scm + } + } + stage('Build Docker images') { + steps { + script { + configFileProvider([configFile(fileId: 'update-docker-description', targetLocation: 'update.sh')]) { + // List all directories, each directory contains a plugin + def pluginDirectories = """${sh ( + script: "ls -d */", + returnStdout: true + )}""" + // Iterate over each plugin directory + pluginDirectories.split().each { repo -> + // Truncate hanging "/" for each directory + def pluginName = repo.getAt(0..(repo.length() - 2)) + // Check if VERSION file for each plugin file has changed + def isChanged = "0" + + if (env.GIT_PREVIOUS_SUCCESSFUL_COMMIT) { + isChanged = """${sh ( + script: "git diff --name-only ${GIT_PREVIOUS_SUCCESSFUL_COMMIT} ${GIT_COMMIT} | grep ${pluginName}/VERSION", + returnStatus: true + )}""" + } + if (isChanged == "0" && pluginName != "utils") { + dir("${WORKSPACE}/${pluginName}") { + def dockerVersion = readFile(file: 'VERSION').trim() + docker.withRegistry('https://registry-1.docker.io/v2/', 'f16c74f9-0a60-4882-b6fd-bec3b0136b84') { + def image = docker.build("labshare/${pluginName}", '--no-cache ./') + image.push() + image.push(dockerVersion) + } + + env.PROJECT_NAME = "${pluginName}" + env.FULL_DESC = readFile(file: 'README.md') + env.BRIEF_DESC = "${PROJECT_URL}/tree/master/${PROJECT_NAME}" + } + withCredentials([usernamePassword(credentialsId: 'f16c74f9-0a60-4882-b6fd-bec3b0136b84', usernameVariable: 'DOCKER_USER', passwordVariable: 'DOCKER_PW')]) { + sh "sh ./update.sh" + } + } + } + } + } + } + } + } +} \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..e69de29 diff --git a/VERSION b/VERSION new file mode 100644 index 0000000..6e8bf73 --- /dev/null +++ b/VERSION @@ -0,0 +1 @@ +0.1.0 diff --git a/clustering/k-means-clustering-tool/.bumpversion.cfg b/clustering/k-means-clustering-tool/.bumpversion.cfg new file mode 100644 index 0000000..8664577 --- /dev/null +++ b/clustering/k-means-clustering-tool/.bumpversion.cfg @@ -0,0 +1,34 @@ +[bumpversion] +current_version = 0.3.5-dev1 +commit = True +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:plugin.json] + +[bumpversion:file:VERSION] + +[bumpversion:file:README.md] + +[bumpversion:file:CHANGELOG.md] +[bumpversion:file:ict.yaml] +[bumpversion:file:k-meansclustering.cwl] + +[bumpversion:file:src/polus/tabular/clustering/k_means/__init__.py] + diff --git a/clustering/k-means-clustering-tool/CHANGELOG.md b/clustering/k-means-clustering-tool/CHANGELOG.md new file mode 100644 index 0000000..07e3b83 --- /dev/null +++ b/clustering/k-means-clustering-tool/CHANGELOG.md @@ -0,0 +1,13 @@ +# K-Means Clustering(0.3.5-dev1) + +1. This plugin is updated only to the new plugin standards +2. Before plugin support only `.csv` as an input files supported `.csv` and `.feather` file formats. Now this plugin support other vaex supported file formats both as inputs and outputs. +3. Some additional input arguments added `filePattern`, `fileExtension` +4. Implemented latest updated filepattern package +5. This plugin is now installable with pip. +6. Argparse package is replaced with Typer package for command line arguments. +7. `baseCommand` added in a plugin manifiest. +8. `--preview` flag is added which shows outputs to be generated by this plugin. +9. Use `python -m python -m polus.plugins.clustering.k_means` to run plugin from command line. +10. No unnitests before and new pytests added for testing. +11. Implemented parallel processing diff --git a/clustering/k-means-clustering-tool/Dockerfile b/clustering/k-means-clustering-tool/Dockerfile new file mode 100644 index 0000000..6ad3068 --- /dev/null +++ b/clustering/k-means-clustering-tool/Dockerfile @@ -0,0 +1,20 @@ +FROM polusai/bfio:2.1.9 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".arrow" +ENV POLUS_LOG="INFO" + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +COPY src ${EXEC_DIR}/src + +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +ENTRYPOINT ["python3", "-m", "polus.tabular.clustering.k_means"] +CMD ["--help"] diff --git a/clustering/k-means-clustering-tool/README.md b/clustering/k-means-clustering-tool/README.md new file mode 100644 index 0000000..931c055 --- /dev/null +++ b/clustering/k-means-clustering-tool/README.md @@ -0,0 +1,65 @@ +# K-Means Clustering(v0.3.5-dev1) + +The K-Means Clustering plugin clusters the data using Scikit-learn K-Means clustering algorithm and outputs csv file. Each instance(row) in the input csv file is assigned to one of the clusters. The output csv file contains the column 'Cluster' that shows which cluster the instance belongs to. + +## Inputs: + +### Input data: +The input tabular data that need to be clustered. This plugin supports `.csv` and `.arrow` file formats + +### Methods: +Choose any one of the method mentioned to determine the k-value and cluster the data. + +#### Elbow method +The elbow method runs k-means clustering for a range of values of k and for each k value it calculates the within cluster sum of squared errors (WSS). The idea behind this method is that SSE tends to decrease towards 0 as k-value increases. The goal here is to choose a k-value that has low WSS and the elbow represents where there is diminishing returns by increasing k. + +#### Calinski-Harabasz index +The Calinski-Harabasz index is defined as the ratio of the sum of between-cluster dispersion to the sum of within-cluster dispersion. To choose k, pick maximum number of clusters to be considered and then choose the value of k with the highest score. + +#### Davies-Bouldin index +The Davies-Bouldin index is defined as the average similarity measure of each cluster with its most similar one, where similarity is a ratio of within-cluster distances to between-cluster distances. To choose k value, pick maximum number of clusters to be considered and choose the value of k with lowest value for DB_index. + +### Manual +Select manual method only when you know the number of clusters required to cluster the data. + +### Minimum range: +Enter starting number of sequence in range function to determine k-value. This parameter is required only when elbow or Calinski Harabasz or Davies Bouldin methods are selected. + +### Maximum range: +Enter ending number of sequence in range function to determine k-value. This parameter is required only when elbow or Calinski Harabasz or Davies Bouldin methods are selected. + +### Number of clusters: +Enter k-value if you already know how many clusters are required. This parameter is required only when manual method is selected. + +## Note: +1. If 'Manual' method is selected, enter number of clusters required. +2. If 'Elbow' or 'CalinskiHarabasz' or 'DaviesBouldin' methods are selected, then you should enter values for both 'maximumrange' and 'minimumrange'. +3. The 'minimumrange'value should be >1. + +## Output: +The output is a tabular file containing the cluster data to which each instance in the data belongs to. + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Options + +This plugin takes seven input arguments and one output argument: + +| Name | Description | I/O | Type | +| ---------------- | --------------------------------------------------------------------------- | ------ | ------------- | +| `--inpdir` | Input tabular data | Input | genericData | +| `--filePattern` | Pattern to parse tabular files | Input | string | +| `--methods` | Select either Elbow or Calinski Harabasz or Davies Bouldin or Manual method | Input | enum | +| `--minimumrange` | Enter minimum k-value | Input | integer | +| `--maximumrange` | Enter maximum k-value | Input | integer | +| `--numofclus` | Enter number of clusters | Input | integer | +| `--outdir` | Output collection | Output | genericData | +| `--preview` | Generate JSON file with outputs | Output | JSON | diff --git a/clustering/k-means-clustering-tool/VERSION b/clustering/k-means-clustering-tool/VERSION new file mode 100644 index 0000000..65543cf --- /dev/null +++ b/clustering/k-means-clustering-tool/VERSION @@ -0,0 +1 @@ +0.3.5-dev1 diff --git a/clustering/k-means-clustering-tool/build-docker.sh b/clustering/k-means-clustering-tool/build-docker.sh new file mode 100644 index 0000000..275ef59 --- /dev/null +++ b/clustering/k-means-clustering-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(", +"Kelechi Nina Mezu ", +"hamshkhawar " +] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = "^3.9" +filepattern = "^2.0.0" +typer = "^0.7.0" +nyxus = "^0.5.0" +vaex = "^4.7.0" +scikit_learn="^1.0.2" + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +pre-commit = "^3.0.4" +black = "^23.1.0" +flake8 = "^6.0.0" +mypy = "^1.0.0" +pytest = "^7.2.1" +ipykernel = "^6.21.2" +requests = "^2.28.2" +pandas = "^2.0.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/clustering/k-means-clustering-tool/run-plugin.sh b/clustering/k-means-clustering-tool/run-plugin.sh new file mode 100644 index 0000000..18c8bea --- /dev/null +++ b/clustering/k-means-clustering-tool/run-plugin.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +version=$( None: + """K-means clustering plugin.""" + logger.info(f"inpDir = {inp_dir}") + logger.info(f"filePattern = {file_pattern}") + logger.info(f"minimumRange = {minimum_range}") + logger.info(f"maximumRange = {maximum_range}") + logger.info(f"numOfClus = {num_of_clus}") + logger.info(f"outDir = {out_dir}") + + assert inp_dir.exists(), f"{inp_dir} doesnot exist!! Please check input path again" + assert out_dir.exists(), f"{out_dir} doesnot exist!! Please check output path again" + assert file_pattern in [ + ".csv", + ".arrow", + ], f"{file_pattern} tabular files are not supported by this plugin" + + num_threads = max([cpu_count(), 2]) + + pattern = ".*" + file_pattern + fps = fp.FilePattern(inp_dir, pattern) + print(pattern) + + if not fps: + msg = f"No {file_pattern} files found." + raise ValueError(msg) + + if preview: + with open(pathlib.Path(out_dir, "preview.json"), "w") as jfile: + out_json: dict[str, Any] = { + "filepattern": pattern, + "outDir": [], + } + for file in fps(): + out_name = str(file[1][0].stem) + POLUS_TAB_EXT + out_json["outDir"].append(out_name) + json.dump(out_json, jfile, indent=2) + + flist = [f[1][0] for f in fps()] + + with multiprocessing.Pool(processes=num_threads) as executor: + executor.map( + partial( + km.clustering, + file_pattern=pattern, + methods=methods, + minimum_range=minimum_range, + maximum_range=maximum_range, + num_of_clus=num_of_clus, + out_dir=out_dir, + ), + flist, + ) + executor.close() + executor.join() + + +if __name__ == "__main__": + app() diff --git a/clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/k_means.py b/clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/k_means.py new file mode 100644 index 0000000..ce2ad64 --- /dev/null +++ b/clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/k_means.py @@ -0,0 +1,215 @@ +"""K_means clustering.""" +import logging +import os +import pathlib + +import numpy +import numpy as np +import numpy.matlib +import vaex +from sklearn.cluster import KMeans +from sklearn.metrics import calinski_harabasz_score, davies_bouldin_score + +from .utils import Methods + +# Initialize the logger +logging.basicConfig( + format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", + datefmt="%d-%b-%y %H:%M:%S", +) +logger = logging.getLogger("main") +logger.setLevel(os.environ.get("POLUS_LOG", logging.INFO)) +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".arrow") + + +def elbow(data_array: np.array, minimum_range: int, maximum_range: int) -> np.array: + """Determine k value and cluster data using elbow method. + + Args: + data_array : Input data. + minimum_range : Starting number of sequence in range function to determine k-value. + maximum_range : Ending number of sequence in range function to determine k-value. + + Returns: + Labeled data. + """ + sse = [] + label_value = [] + logger.info("Starting Elbow Method...") + K = range(minimum_range, maximum_range + 1) + for k in K: + kmeans = KMeans(n_clusters=k, random_state=9).fit(data_array) + centroids = kmeans.cluster_centers_ + pred_clusters = kmeans.predict(data_array) + curr_sse = 0 + + # calculate square of Euclidean distance of each point from its cluster center and add to current WSS + logger.info("Calculating Euclidean distance...") + for i in range(len(data_array)): + curr_center = centroids[pred_clusters[i]] + curr_sse += np.linalg.norm(data_array[i] - np.array(curr_center)) ** 2 + sse.append(curr_sse) + labels = kmeans.labels_ + label_value.append(labels) + + logger.info("Finding elbow point in curve...") + # Find the elbow point in the curve + points = len(sse) + # Get coordinates of all points + coord = np.vstack((range(points), sse)).T + # First point + f_point = coord[0] + # Vector between first and last point + linevec = coord[-1] - f_point + # Normalize the line vector + linevecn = linevec / np.sqrt(np.sum(linevec**2)) + # Vector between all point and first point + vecf = coord - f_point + # Parallel vector + prod = np.sum(vecf * numpy.matlib.repmat(linevecn, points, 1), axis=1) + vecfpara = np.outer(prod, linevecn) + # Perpendicular vector + vecline = vecf - vecfpara + # Distance from curve to line + dist = np.sqrt(np.sum(vecline**2, axis=1)) + # Maximum distance point + k_cluster = np.argmax(dist) + minimum_range + logger.info("k cluster: %s", k_cluster) + logger.info("label value: %s", label_value) + logger.info("Setting label_data") + label_data = label_value[k_cluster] + return label_data + + +def calinski_davies( + data_array: np.array, methods: Methods, minimum_range: int, maximum_range: int +) -> np.array: + """Determine k value and cluster data using Calinski Harabasz Index method or Davies Bouldin based on method selection. + + Args: + data: Input data. + methods: Select either Calinski Harabasz or Davies Bouldin method. + minimum_range: Starting number of sequence in range function to determine k-value. + maximum_range:Ending number of sequence in range function to determine k-value. + + Returns: + Labeled data. + """ + K = range(minimum_range, maximum_range + 1) + chdb = [] + label_value = [] + for k in K: + kmeans = KMeans(n_clusters=k, random_state=9).fit(data_array) + labels = kmeans.labels_ + label_value.append(labels) + if f"{methods}" == "CalinskiHarabasz": + ch_db = calinski_harabasz_score(data_array, labels) + else: + ch_db = davies_bouldin_score(data_array, labels) + chdb.append(ch_db) + if f"{methods}" == "CalinskiHarabasz": + score = max(chdb) + else: + score = min(chdb) + k_cluster = chdb.index(score) + label_data = label_value[k_cluster] + return label_data + + +def clustering( + file: pathlib.Path, + file_pattern: str, + methods: Methods, + minimum_range: int, + maximum_range: int, + num_of_clus: int, + out_dir: pathlib.Path, +): + """K-means clustering methods to find clusters of similar or more related objects. + + Args: + file: Input path. + file_pattern: Pattern to parse tabular files. + methods: Select either Calinski Harabasz or Davies Bouldin method or Manual. + minimum_range: Starting number of sequence in range function to determine k-value. + maximum_range:Ending number of sequence in range function to determine k-value. + """ + # Get file name + filename = file.stem + logger.info("Started reading the file " + file.name) + with open(file, encoding="utf-8", errors="ignore") as fr: + ncols = len(fr.readline().split(",")) + chunk_size = max([2**24 // ncols, 1]) + + if f"{file_pattern}" == ".csv": + df = vaex.read_csv(file, convert=True, chunk_size=chunk_size) + else: + df = vaex.open(file) + # Get list of column names + cols = df.get_column_names() + + # Separate data by categorical and numerical data types + numerical = [] + categorical = [] + for col in cols: + if df[col].dtype == str: + categorical.append(col) + else: + numerical.append(col) + # Remove label field + if "label" in numerical: + numerical.remove("label") + + if numerical is None: + raise ValueError("There are no numerical features in the data.") + else: + data = df[numerical] + + if categorical: + cat_array = df[categorical] + else: + logger.info("No categorical features found in the data") + + if f"{methods}" != "Manual": + # Check whether minimum range and maximum range value is entered + if methods and not (minimum_range or maximum_range): + raise ValueError( + "Enter both minimumrange and maximumrange to determine k-value." + ) + if minimum_range <= 1: + raise ValueError("Minimumrange should be greater than 1.") + logger.info( + "Determining k-value using " + methods + " and clustering the data." + ) + if f"{methods}" == "CalinskiHarabasz": + label_data = calinski_davies(data, methods, minimum_range, maximum_range) + if f"{methods}" == "DaviesBouldin": + label_data = calinski_davies(data, methods, minimum_range, maximum_range) + if f"{methods}" == "Elbow": + label_data = elbow(data, minimum_range, maximum_range) + else: + # Check whether numofclus is entered + if not num_of_clus: + raise ValueError("Enter number of clusters") + kvalue = num_of_clus + kmeans = KMeans(n_clusters=kvalue).fit(data) + label_data = kmeans.labels_ + + # Cluster data using K-Means clustering + logger.info("Adding Cluster Data") + data["Cluster"] = label_data + + # Add Categorical Data back to data processed + if categorical: + logger.info("Adding categorical data") + for col in categorical: + data[col] = cat_array[col].values + + # Save dataframe to feather file or to csv file + out_file = pathlib.Path(out_dir, (filename + POLUS_TAB_EXT)) + + if f"{POLUS_TAB_EXT}" in [".feather", ".arrow"]: + data.export_feather(out_file) + else: + logger.info("Saving csv file") + data.export_csv(out_file, chunk_size=chunk_size) diff --git a/clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/utils.py b/clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/utils.py new file mode 100644 index 0000000..91bb81b --- /dev/null +++ b/clustering/k-means-clustering-tool/src/polus/tabular/clustering/k_means/utils.py @@ -0,0 +1,12 @@ +"""K_means clustering.""" +import enum + + +class Methods(str, enum.Enum): + """Clustering methods to determine k-value.""" + + ELBOW = "Elbow" + CALINSKIHARABASZ = "CalinskiHarabasz" + DAVIESBOULDIN = "DaviesBouldin" + MANUAL = "Manual" + Default = "Elbow" diff --git a/clustering/k-means-clustering-tool/tests/__init__.py b/clustering/k-means-clustering-tool/tests/__init__.py new file mode 100644 index 0000000..36f89f9 --- /dev/null +++ b/clustering/k-means-clustering-tool/tests/__init__.py @@ -0,0 +1 @@ +"""K_means clustering.""" diff --git a/clustering/k-means-clustering-tool/tests/conftest.py b/clustering/k-means-clustering-tool/tests/conftest.py new file mode 100644 index 0000000..58dce0f --- /dev/null +++ b/clustering/k-means-clustering-tool/tests/conftest.py @@ -0,0 +1,91 @@ +"""Test Fixtures.""" + +import pathlib +import shutil +import tempfile + +import numpy as np +import pandas as pd +import pytest + + +class Generatedata: + """Generate tabular data with several different file format.""" + + def __init__(self, file_pattern: str, size: int, outname: str) -> None: + """Define instance attributes.""" + self.dirpath = pathlib.Path.cwd() + self.inp_dir = tempfile.mkdtemp(dir=self.dirpath) + self.out_dir = tempfile.mkdtemp(dir=self.dirpath) + self.file_pattern = file_pattern + self.size = size + self.outname = outname + self.x = self.create_dataframe() + + def get_inp_dir(self) -> pathlib.Path: + """Get input directory.""" + return pathlib.Path(self.inp_dir) + + def get_out_dir(self) -> pathlib.Path: + """Get output directory.""" + return pathlib.Path(self.out_dir) + + def create_dataframe(self) -> pd.core.frame.DataFrame: + """Create Pandas dataframe.""" + rng = np.random.default_rng() + diction_1 = { + "A": np.linspace(0.0, 4.0, self.size, dtype="float32", endpoint=False), + "B": np.linspace(0.0, 6.0, self.size, dtype="float32", endpoint=False), + "C": np.linspace(0.0, 8.0, self.size, dtype="float32", endpoint=False), + "D": np.linspace(0.0, 10.0, self.size, dtype="float32", endpoint=False), + "label": rng.integers(low=1, high=4, size=self.size), + } + + return pd.DataFrame(diction_1) + + def csv_func(self) -> None: + """Convert pandas dataframe to csv file format.""" + self.x.to_csv(pathlib.Path(self.inp_dir, self.outname), index=False) + + def arrow_func(self) -> None: + """Convert pandas dataframe to Arrow file format.""" + self.x.to_feather(pathlib.Path(self.inp_dir, self.outname)) + + def __call__(self) -> None: + """To make a class callable.""" + data_ext = { + ".csv": self.csv_func, + ".arrow": self.arrow_func, + } + + return data_ext[self.file_pattern]() + + def clean_directories(self) -> None: + """Remove files.""" + for d in self.dirpath.iterdir(): + if d.is_dir() and d.name.startswith("tmp"): + shutil.rmtree(d) + + +def pytest_addoption(parser: pytest.Parser) -> None: + """Add options to pytest.""" + parser.addoption( + "--slow", + action="store_true", + dest="slow", + default=False, + help="run slow tests", + ) + + +@pytest.fixture( + params=[ + ("CalinskiHarabasz", 500, ".csv", 2, 5), + ("DaviesBouldin", 250, ".arrow", 2, 7), + ("Elbow", 500, ".arrow", 2, 10), + ("Manual", 200, ".arrow", 2, 5), + ], +) +def get_params(request: pytest.FixtureRequest) -> pytest.FixtureRequest: + """To get the parameter of the fixture.""" + return request.param diff --git a/clustering/k-means-clustering-tool/tests/test_main.py b/clustering/k-means-clustering-tool/tests/test_main.py new file mode 100644 index 0000000..922a7ac --- /dev/null +++ b/clustering/k-means-clustering-tool/tests/test_main.py @@ -0,0 +1,142 @@ +"""K_means clustering.""" + +import shutil + +import filepattern as fp +import pytest +import vaex +from polus.tabular.clustering.k_means import k_means as km +from polus.tabular.clustering.k_means.__main__ import app +from typer.testing import CliRunner + +from .conftest import Generatedata + +runner = CliRunner() + + +@pytest.mark.parametrize( + ("ext", "minrange", "maxrange"), + [(".arrow", 2, 5), (".csv", 2, 7)], +) +@pytest.mark.skipif("not config.getoption('slow')") +def test_elbow(ext: str, minrange: int, maxrange: int) -> None: + """Testing elbow function.""" + d = Generatedata(ext, outname=f"data_1{ext}", size=10000) + d() + pattern = f".*{ext}" + fps = fp.FilePattern(d.get_inp_dir(), pattern) + + for file in fps(): + if f"{pattern}" == ".csv": + df = vaex.read_csv(file[1][0], convert=True) + else: + df = vaex.open(file[1][0]) + + label_data = km.elbow( + data_array=df[:, :4].values, + minimum_range=minrange, + maximum_range=maxrange, + ) + + assert label_data is not None + + d.clean_directories() + + +@pytest.mark.parametrize( + ("method", "datasize", "ext", "minrange", "maxrange"), + [ + ("CalinskiHarabasz", 500, ".arrow", 2, 5), + ("DaviesBouldin", 600, ".csv", 2, 7), + ], +) +@pytest.mark.skipif("not config.getoption('slow')") +def test_calinski_davies( + method: str, + datasize: int, + ext: str, + minrange: int, + maxrange: int, +) -> None: + """Testing calinski_davies and davies_bouldin methods.""" + d = Generatedata(ext, outname=f"data_1{ext}", size=datasize) + d() + pattern = f".*{ext}" + fps = fp.FilePattern(d.get_inp_dir(), pattern) + + for file in fps(): + if f"{pattern}" == ".csv": + df = vaex.read_csv(file[1][0], convert=True) + else: + df = vaex.open(file[1][0]) + + label_data = km.calinski_davies( + data_array=df[:, :4].values, + methods=method, + minimum_range=minrange, + maximum_range=maxrange, + ) + + assert label_data is not None + + d.clean_directories() + + +@pytest.mark.skipif("not config.getoption('slow')") +def test_clustering(get_params: pytest.FixtureRequest) -> None: + """Test clustering function.""" + method, datasize, ext, minrange, maxrange = get_params + d = Generatedata(ext, outname=f"data_1{ext}", size=datasize) + d() + pattern = f".*{ext}" + numclusters = 3 + fps = fp.FilePattern(d.get_inp_dir(), pattern) + for file in fps(): + km.clustering( + file=file[1][0], + file_pattern=ext, + methods=method, + minimum_range=minrange, + maximum_range=maxrange, + num_of_clus=numclusters, + out_dir=d.get_out_dir(), + ) + assert d.get_out_dir().joinpath("data_1.arrow") + df = vaex.open(d.get_out_dir().joinpath("data_1.arrow")) + assert "Cluster" in df.columns + d.clean_directories() + + +def test_cli(get_params: pytest.FixtureRequest) -> None: + """Test Cli.""" + method, data_size, inpext, minrange, maxrange = get_params + d = Generatedata(inpext, outname=f"data_1{inpext}", size=data_size) + d() + shutil.copy( + d.get_inp_dir().joinpath(f"data_1{inpext}"), + d.get_inp_dir().joinpath(f"data_2{inpext}"), + ) + numclusters = 3 + + result = runner.invoke( + app, + [ + "--inpDir", + d.get_inp_dir(), + "--filePattern", + inpext, + "--methods", + method, + "--minimumRange", + minrange, + "--maximumRange", + maxrange, + "--numOfClus", + numclusters, + "--outDir", + d.get_out_dir(), + ], + ) + assert result.exit_code == 0 + + d.clean_directories() diff --git a/clustering/outlier-removal-tool/.bumpversion.cfg b/clustering/outlier-removal-tool/.bumpversion.cfg new file mode 100644 index 0000000..72c49c4 --- /dev/null +++ b/clustering/outlier-removal-tool/.bumpversion.cfg @@ -0,0 +1,35 @@ +[bumpversion] +current_version = 0.2.7-dev1 +commit = True +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:VERSION] + +[bumpversion:file:README.md] + +[bumpversion:file:plugin.json] + +[bumpversion:file:CHANGELOG.md] + +[bumpversion:file:ict.yaml] + +[bumpversion:file:outlierremoval.cwl] + +[bumpversion:file:src/polus/tabular/clustering/outlier_removal/__init__.py] diff --git a/clustering/outlier-removal-tool/.dockerignore b/clustering/outlier-removal-tool/.dockerignore new file mode 100644 index 0000000..7c603f8 --- /dev/null +++ b/clustering/outlier-removal-tool/.dockerignore @@ -0,0 +1,4 @@ +.venv +out +tests +__pycache__ diff --git a/clustering/outlier-removal-tool/.gitignore b/clustering/outlier-removal-tool/.gitignore new file mode 100644 index 0000000..9ed1c37 --- /dev/null +++ b/clustering/outlier-removal-tool/.gitignore @@ -0,0 +1,23 @@ +# Jupyter Notebook +.ipynb_checkpoints +poetry.lock +../../poetry.lock +# Environments +.env +.myenv +.venv +env/ +venv/ +# test data directory +data +# yaml file +.pre-commit-config.yaml +# hidden files +.DS_Store +.ds_store +# flake8 +.flake8 +../../.flake8 +__pycache__ +.mypy_cache +requirements.txt diff --git a/clustering/outlier-removal-tool/CHANGELOG.md b/clustering/outlier-removal-tool/CHANGELOG.md new file mode 100644 index 0000000..09a0c7f --- /dev/null +++ b/clustering/outlier-removal-tool/CHANGELOG.md @@ -0,0 +1,15 @@ +# [0.2.7-dev1] - 2024-01-12 + +## Added + +- Pytests to test this plugin +- This plugin is now installable with pip. +- Added support for arrow file format in addition to csv + +## Changed + +- Updated dependencies (bfio, filepattern, preadator) to latest +- Argparse package is replaced with Typer package for command line arguments +- Replaced docker base image with latest container image with pre-installed bfio +- Replaced pandas with vaex +- Seperating descriptive from numerical features for outlier detection if present in the tabular data diff --git a/clustering/outlier-removal-tool/Dockerfile b/clustering/outlier-removal-tool/Dockerfile new file mode 100644 index 0000000..3889076 --- /dev/null +++ b/clustering/outlier-removal-tool/Dockerfile @@ -0,0 +1,20 @@ +FROM polusai/bfio:2.3.6 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".arrow" + + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} + +RUN pip3 install ${EXEC_DIR} --no-cache + + +ENTRYPOINT ["python3", "-m", "polus.tabular.clustering.outlier_removal"] +CMD ["--help"] diff --git a/clustering/outlier-removal-tool/README.md b/clustering/outlier-removal-tool/README.md new file mode 100644 index 0000000..3a97455 --- /dev/null +++ b/clustering/outlier-removal-tool/README.md @@ -0,0 +1,52 @@ +# Outlier removal (v0.2.7-dev1) + +The outlier removal plugin removes the outliers from the data based on the method selected and outputs csv file. The output will have separate csv files for inliers and outliers. The input file should be in csv format. + +The plugin support vaex supported input csv file that need outliers to be removed. The file should be in csv format. This is a required parameter for the plugin. + +## Methods + +Choose any one of the methods mentioned to remove outliers from the data. + +### Isolation Forest + +Ensemble-based unsupervised method for outlier detection. The algorithm isolates outliers instead of normal instances. It works based on the principle that outliers are few and different and hence, the outliers can be identified easier than the normal points. The score is calculated as the path length to isolate the observation. These two methods can be selected to detect outliers> + +1. `IsolationForest` Detect outliers globally that deviates significantly from the rest of the datapoints +2. `IForest` Detect local outliers that are distinct when compared to those of its neighbors. + + +### Global + + + +### Local + + + +## Outputs: + +Select the output file by passing value to `outputType`. User can select from following options `inlier`, `oulier` or `combined`. The combined file contains `anomaly` column which score each datapoint if it is inlier or outlier. + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh` + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Options + +This plugin takes three input arguments and one output argument: + +| Name | Description | I/O | Type | +| ----------- | ------------------------------------- | ------ | ------------- | +| `--inpDir` | Input directory containing tabular files | Input | genericData | +| `--filePattern` | Pattern to parse tabular file names | Input | string | +| `--methods` | Select methods for outlier removal | Input | enum | +| `--outputType` | Select type of output file | Input | enum | +| `--outdir` | Output collection | Output | genericData | +| `--preview` | Generate a JSON file with outputs | Output | JSON | diff --git a/clustering/outlier-removal-tool/VERSION b/clustering/outlier-removal-tool/VERSION new file mode 100644 index 0000000..7d24d15 --- /dev/null +++ b/clustering/outlier-removal-tool/VERSION @@ -0,0 +1 @@ +0.2.7-dev1 diff --git a/clustering/outlier-removal-tool/build-docker.sh b/clustering/outlier-removal-tool/build-docker.sh new file mode 100644 index 0000000..be64f72 --- /dev/null +++ b/clustering/outlier-removal-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(v3C|>akiX09m1?Ihb_iz;Cr8Vx|djPq24`l=W zA@EL|)#w56KaOwoYfG3@2_9Pizu4nh9AAH8<) z`rf_U90h3!EjOd>lt*r$7o@=nQK0*jU~pz@D2@~X{{5^cU=vbQnH*g;cqsgRI==oB zlZ|FqX*fEHf8ZkRv(08Y>dl?Y^~Od%mZTZ$lvi=d3G2e-Z{Fn1e_jw+N_O0>mcU2- z`=XV`_XDH*KS91Ed!L}l{QDp<9KoWwzIq2 zZ14*YzsE6z&A26klFyd^lYY-krNzOp*saO+#RqiPy;ilZ7A`*9YZzt zIcNAa@Hqg{6V04A`wpTqTVoTx*K&!gnX9DU9u8)xN0f*_ReY{bbdI-Yhyotrb#Kp9 zew0Tp>xak4WtS%~X!dx~Y`E0wE5>Ux8~Bk~$L#wD2HiS)S`v1nh}Zp}OeYIpOxMBa zPJs)kLab*h&{71Q+t<^4OOibV>%E8`LpV7((ed!)fUj|S99t3({+#nET0U>Ny}8WS zEeCu2*^Qhm(m_lVXh!8^^!VOh=Vd+yX?ydhX`6JKAFvc9b89LbO>w;TmCXxJ_` z6^OE`nZlNV`jDuPNXQkJU;zdy9CH_SbmFetFC-Qiz6hIz6k> z!o|f_>BkuBr52G~)im2LW(it5Zy)Yavf85o`}UI-H~@jKaJs%XxgU*ueaW{ruaQ_~ zIf`<9`KKpaAx2`hL%)pYFmGwfAlm$?5YOQ%}$(Xq0Z0;53#?663}Vc87^Xi zen@ewg&u7vY2LGcdDVaG_)>}6oHfFc-EcO8O}`3qak>+})}O#Uo@TLXarA4D-)UpS zCW^PV&0Ng=Fq_GK{d}LhI4=W6xt$TN{)D9Uh$@BpwJ#dnR>wB3`Uxek^*1;%^F%qD zVS^TR&*{z_B1a8&(Hl!QU2R2LW4ll&+Rr<+?=>9^il2Wt7tzzs$YG119 zcAj3*hPis8BMzxPFIpoSx|pC5-8^lo4Q@{y?rsaa&F~h zXQ}k~)dg8?v+aW5WDBSzdH4uF7C@W-tq+mHqj(jt%=dow)?yFWO#TdnN?vUL38ig` z>UtbFK;;>=CCMABp0POb<@JpLTyieyosc0hD}#3v`F`Dzl*|Vbd6xs9x%CoObU zkGd)CAQmlpm2ZA~xo?+5RkxzZ)H2IFlraF-SG`D^)wy`D`A_O^k#PSQnGF#Ag`0qg z<1Z!R3^*LF;Y@FBj?;zFMM9@%?0xbP{z)x=Kl%0yfk!;aWYomJ+j{FGd^&9o(JoFT zWPa?mf8Xa~lko}|m11@s5hT^aG;1V%#8fL1>eFQ5rUIOPw%?OQc_r6Z=SK((nFUIU zQgW>Q%j3-eyVh%MYMqZvFZKudmLRd#~fWhq$Oc1UC#q{6jBaEsNbA-YO^YzE}yi`{SNW;fsbz#|UDKFjBj>Stan} zK#lC+Hp1R%b4&(<*SjfVRgL9<+hFJ!t8DNy!FDd&dH#5&7m9|J^^WUIqXgJ~E4vqO zP5XK=(XeY}BqUASYu4Ql^vie)09avS7}0avbXhG|pLk|Htrn&T=aJpMODm|4k?2DI z{FoBfGU~Hizphqet+%Fi#M-R)rz+Kc{pZ-Gg(4BPl`Ja+vu-gEgGj6MeRD7;*Wrvs zTETEdP0&!8tkA@tMbA=)zUr7|3joVp*FfW&c|-$_8RNs2I4SQ0mz2aZ9aSu@0I=6m zX^zqwQUA40Mv$(S^`P`XC!OPW8v@v_m`o{AuQU6BFFen4Ow~~Dek3 z7x2AlA~f^Mb*pdmG3Xw4xSVI36XfF3a8?+A;ICIMkV97l%Hz;5aZ@hx=3)uTxL)_A ztE31r$;3?gr>&nXi!iQ8-X8piO3zZmv=;*^?|1JnFXwQ>Q=Bt&&O+ za~*Tc|7DYrQp+A)Y2tQpghjDe5k~9rM1YA3gBs;DT9=Z270p1J)0&uGem9WvtUd zoLvlE#AsJ>xJ0wVDE2B4^uI~#jCJu?nxVrQD&kCd0Kfn3)apoYX2?#P! zAEszxQVQM(}Cwjw`eww z9R%pGi>;P@>O=&O=h>;P1!tA?%o+%`%r;i7yoe#ze7kWg@Hs?12Sw*?rCajlL{HIN zKudEpEX&5UrDcwG=f8NiMv9lS*i+cLc*ig71%n>9=^Ml8&SF2f$KY zb5>-?^{l&B*aj3RIQ@n`g^<)4Ad1A7-A6WWwn|&Iz7WranYCA2CW8YA9tvJWT#Rhy znP9!}Yd??hDfhNiS~K$?og0~fH0Wx!%Tr~sM%0xzI|(_YfjXLoxFnl_gjW4GBr`Re zHsUL=jO{d=bQ>ieCCffai-Jj_1R0K1bO*lGqRL3!GypxO*bTpYHH#-hNUaAalcgX# z8NuZlAK5^y6b(A!iBKtH`Zk5iUz#7H|t3k`EFnn%z&1S|v3 zKg~=xXM8Z!ee_!H;XIq?3k1({_2^FT=`VpT3R-}_wh8`C%cigVh8R=qjNz-BCR`d4 zA)2l@RB#lT=+?Fs*W6&v>(G_J%XF!)Xo}12K?l8b(fMn_e9C56pcKU}uw!a~%8PL| z=`!6E9_DQrD-;zoh(!keS={vZV>$>(2c#c?DIlkI=DyyGq|YYQcU0}uex$Ib&BnI` zW}Tre+hA{E$YQpeI{tamN>@ILXW+_9&vg*H;FZ&_nQJy`pC5^3e5|=9uqND;l`enh zR3S0SP;r>@ff;5pJJiU7itvYrT2&cuaMuY1_4iEqEM0CQh=8f^ua{1C8cp-W-yL5H;)HLVY0wYLqht{p?CLv}(BWOUk<~Asmy@Loac5D@^QBnA zxoMc`pnjMjYK#@gN%5qqCs$#IjUefFZj@)1BTt;!gpCa8RcSj8boo=U{`6b5^u`lQ($DO>bo??khat&sN>shEQwF5pw~l_nxR~S| zTkP&~0lnzoHMJa-=!xHyby|hu@+Z6CYi` zb|lyI}0lCmE_Dla39^>D*|_adIP=kliw)5tTF_1<5+Wp z!$UGJ%`C{=vFtSPa;eNTKoC!`7lf;1Xr@m{eIt&BX_~}^(sg8!&}5Azo0Hr~{_FhN zy9xQ7=&eoL;Q9yx^eFJ~>6*i7MAx|z5u*G^kW-n}Zn5bA74uKq#>3LbjRJ2P$ZEIX zr6@8}Oq?R&Z|$2@AwG3Br!n2vNBRf+pK-ep*-ys7GRR6oa^)|I;Gm%{4UBXCSm@+p zg?H^cNq_c`ArQAnZ1>;VJm5%d&u)wZaB_^(V&eUl!vKNcv`4yFt{^O3784D3LLNcs z;rf8*kbP-IGjP9>DnAwxF>*h5KjVRt#TONxMz-Cl(lo*cAb>YSPz!0F724CDpp(eZ0PT05N<-A2*J6aCJq2>DTk4;*W?5zd7uC~TfBI_b~7+Ce8? z*`L$7+X|6$2mvl|_wd2=|8ui{>?`xhU{dQvF=kR*VG_%-Z0J@bCY=z17#dOohH|4` zaiD!q2`CEhA=mq9LAH~gIMVg|s^4ZjHqU%dU-~QcyHorwgju}wpKck()ShmHSs{oH z|6)fZ#YeCQ1QiU5jK$cE#^oNCicV=?*)zcar83W%jb$qovQ}>(U=B<3cz*_C7wg4j zY22C`QtFIdn8XQsH*h0&G;3`a>InYmP3$qLxh+~V%j)SuhS3wELd$L3gh6yIm>}CU zvjEBXQEu6sma`QsXQS=-_v>A)^Ni7vmlh45k%e_&wiI%cp(uipPBiDasdh_HlnvdZ zND*o)!R%Ru+!&K=|8`lS-iF>JyK5-sne0Nn{L4Jd@*97<^UaYljJzX7Si1*_;k~{2 zt&eOszmhyY zz5<8r#56uIGr*=-pT@gIsn+Z`nEGLvQ6MuAi{|iD2 z);U1fCf=JqQ$g|%k)5q^o_4myu=ooY;U6N+z<_E~4;$^LEiaD}) zy?gEA`Wk4GV~&_399d;+f_92Is96puFn_Shs(!a+2A% z_xE+nO#BF;AR1COgKvNeF;Z!v3S=yF=id|R-J3?u9tE!rLbfi#?+^(;Kr&l_=!x)^ z3qqKU2&p+GgJ{*rh%@$et1JnSbwnpd(094V-hlx7Ex7dsw4IIdJUoEBJ*E=#p#p-b z3s8*?c50V0Jx+e->s97*srdd`a6P;JvsCW5u5txPC{fL4QGahHA*Yg=+v-RUE0WopV=6D`6;#gJeI?#5Z!L%zgi;)i=;r?9d3NPdh8vO{UeMQG#fuTP}bVqhP z_Pxpo{LZ)J6|k{kIgl(sBo{>mtmN)OW6^7%-qMaG@g+cNtN7rup}+!zlJ-u3;Y<-D z7ZV#X#ebd^zRGUc5O}mc%nZxKny-%t+AEeS&?%*njiL%O?FcTIV9)haPUN^hS8JzJ zVDL=EktU)1Q&8Fo>7C$4>wmX+uL2INk{vSmO)p42*UfM+(J(a|gK??heIJ<-3$9Bq zpr#QmYCkvRvDaaGkrX^yUlJLe;4`Tro?0-GE_9pYjnRDVuU`uNOz*cMkwW_Jpvv=- zwRS^EF+<`QRSO%4NWN*>42c!_bX2JQwQ#_?p7{iG*OVaChIXiecdWvK9MHw-|W<{uj@1E^C4Z5IYeR+S%9xJQUw%tyH8>hQVO zK^G|*&pXhMQsdH3)xxr1EU5abOP8*6Ol zf)|@TizLWeZ*M$#xUK-9F`hr;p6RmCmb(87`aRsgdlMX$T{hJD|M+%0Td)8w=A(D3 z{7)m2)sj2O@E-rjvYdA2XvF_y8w_ZV#mmPz%wFp%T@ZEDVu1a`jUzIo5LV&F;vMY9 z%$lTNXx6Vte;*FMUNuWz*EK5Bk$9Fe38FKTGwD7h*KP2H^3u$OKCr<1xiT3Z^%Yos z^nx%@+=@(WyAw;U1FJ!u8csyMW}|^-({$2Q&hvx`W0I~)e4n+xkA3`u?gp~3_SwIP zA^DCjRXpR#`DhslF2c#Gg9^;x*|V*f9Z#l<@;dlan)@1pcKoMoK!9bV{+HS{C`BtGq804O)S@NR< zA0Kj4m@F>_@q3;Z6C$DhPT;>I3AEJ&rAel6{B^LqP4aqxIIKGFLNq88jUchcnc~;> z7a~%w4i6g-B@k z{oiPq1wUj41vL6m2H2ETGbufIS>sDybJd8p&X(p#l>!_L{J#G!jkXiL3|3HGC_F45 zJ-7Zxp%%pp44$VG;;a7h3>pBG2aW&=)?ap07NmT0tEv$}*BUIhY)U`N)M810xM2``@1nln{Je ze<&J)GKv;cq{;RwqSum%*Z(aQ{$We+r5eml=J@3dy9)o?IN05N0Py+CyV|IxLi_fo zm-B_{+2E(Fh0^fP!0>ZylC;C(=hf@goy14^GqRPt^?-JgJfMpFBX4`to*wM7CDdWL zzPw#85sU5N9Y$IUk4SfQ$OOQSiE!++!RmelYJ-3a-%#ISKz})66_-lPmnxyu(1i-A z6X7fU-?i%OPy7j?TT#UwAFiKs*!>Y^z|Q<64UIK>3ijfH$7%;xJ0*trffphx50|s? zMy=f0S!OHc!|r13?7#I#ziDg0%hmoWScMsB!5c+{E^|TJ)||2@6I??cQ=_b}2_qPW zsOVG5nvTtvJ;p{@o?@VbTu)7EA06zTxsKZU2q|4zWGZUL#cly0>JN~cyO8n!`Is-# z7X-~?OKte7o%0922;s}8a;WOh09GorZm^2qv@aE1P!pM%_`P`tc*!|=G9&;AjY z19F;Z{|zsYY0B214d4j+XgwM?D3Tb3y`G@=$-}#cT zimg>PmZe&%aX!_a31v-Aiyo?CfvSfg!_?hfMWi3)F?fcI1dF8mR+_`hP4@ijdKLW< zg>^)fDw1@mu==P5{-p4-QM#$;^hn|Z2+Vf}A0H)I6sMCS%KR^v{G@2V^TYL`W|OSq z9Xi+?e6Vo5H}XoK=!$O2;i3_4nv z3WlQ!j(K@=oO}i>&HC{_z3DEp(C)z8FeTw2PiMTb^`!t>+dKw{WF|>kW19Dl3ityp z>AS5|2&HpysPueRLWad(N+RLv|C*T9p@w#EB$*)v)+E>9ERr zB1keL)ChpMPvF1!DLDpn^uaD^1w|c^Tl0UI3|DWy?$y*;Kb1^#$$FO81 zx;Tqm&E`*)+3!jA7$GrA{9nmMyII%rnt|%qRdbR0jfD7W;I*fd3xdHky z9RrjyE6ttjD7&f!W|*LZmKZeqya)qD>9-z^^Z`g(qPJO-O$q|qWnif02Z%J z^>yrtZkfgXUwfY)lbUX}7K#-{6lZ2KfSw9ENW!?lE`fxM$XsBp43wzo|BD9zU0om) zw#*;a^M5N%T`Y1?V;oF5l^S>RLj5;-^Ryu1WDKTEq+80C$)9?|fSk5Z@VC;X-HZXT z3mr6&zsb0C4d!va!qLj+pg7X(BZWjnsf7a7LD&8>mFKI-=Ffk~hcE1eiMw+Vxz%@k zTR-q|Ys(CFF!z-IP?GEi@jwA8M%L52dH_aUk~B6N8^7PSjJ`)1RmNu8I|F7M^D;-r z#w4azAAR#wc7WyMU1d&t@Lv?T0rw0&KdkOYLNpihwA4Es9lvD|9{LldEg3jUnCmo( zWF+0rb+(FfavEkXJx|_PU^fhh%f`Bx%3tQ-yrrN7cj)E2CEav6YbYQ)^!_pbp~}w( ze0CH}o%j@aDl?B%A~=%`J8TD{)uFnKpN2Yus~yWs5G(_;&mM5|4u30Vt>m!Sq8gYJ z#4EovgI!2^Wn|DnfrdmgM&x(_gTqRcv#h5#cMXOS98=EikA1w=!DnY<9J1P9>d0iCKkt~P)a9Zn<6KS0 z2iWFrOEfnS?Z6?_3jBvyM(K2nj4Jn-bx_pR#h2Ub-c~MqP*C19cLcB1+id7f2P@jm z?u(01?pj*%_jq*-OYW88a3^n;p?9dKn)CPBf}xV_1~d5KabnNfmUuc2UWD5R3W1rI zMGT8eO^eORZTZ5IQA4AWrR5%&ns#f>xUmRPuERbflZw2|Kj{C7h79$w{8&Q9-dL%y zxbRIIUGGvHF)@n{X1ke~TW+c3XXUgDwZV{4sGL`pUFgT;K?kPT6}Asx`hiI9ypQq% zHNp?AcsrkT6X=D_ESm}8m{a%DG%Bppi#q|ZNpCT9h7$Czkk1DH1A~1O%=rEUKfTKL z6|WIgl|^rD_~S-{_3&o)Ppx?8{}kYSCgXwW6c4^Oo}EbY6`TSr6nC^cStjf=5b4Z7 zlworCoS+sVEk#E5*Mj!TC>-Vw)pC`RrCbD3kRWY(n8n+4TtlluF&pzKmWbO=(wsi0 zoK~%Y66JLxzk?*#osmRRIKo(@jS0zSN!@7GjCfHMe?5$vVB>~K68VAYNLG>eu(t?{MuehDL_i2CKfR36H<8 zI&VUX|C6OBNOb`|BGT=o2dSQWA znz&{;E@3?VFzGFU(O{YD_>(J}4kOj@r-R0_+rj6+s3o~f`$?W~5$0>4xt*B~$AWA= zbKUkU&t-0rUUe69lezoC{L;^6V}ZeYTfTuvG6sH{W_r#Iu91~VHmdnn&M6`PG+D@k6xtCoCj-xuhu2iS z-2pQgi#E!~$|I$9bcBvd872WlvM|v3snXh5ERjx+qMQbx$4t7tWKrs2F_&$G>EE`# zQb>2T1FT+iblmTa@3{ROw1*Jwmt9j@7KDD<9?_ZxeaYLiETO3`dd` zc~i1h+rM$gpq=Md4}8`Ad~Gf$`UyQ4#7N4X+BIf_HEQYfAkY;@Cm@6=9P>{LrL|ov z-iH+iU>cqOn+8UZsuT9#UT5aOMJCvF{@@ZNKZ4(@4*-l^}(N=?($P7!7 zuLC#%N)dStCd6R1K&?omC=-4H8ufWc= z-;@2pi=x3xI?CU$3>Djc^wKK&*jKy>CI<)o+gj>~618s}vyu`W!}BCz1)niSm;XFD zJ^EwsQJw9A;U81Mk-GcuWCMFBNR(?Z%D+P)0PC8Phesu6f3OXPihmh+=fA(a>_Ta+ zYy7`kAFg5X`*?{xWupvRZ~XNBYZxtk3*UfV`UpfHJGKoO42=ORf$Ai1B3Z-uGFVjS zgldtrV%!g6f<^^!FfePDh`dJRtWjz23Cn2kNB8SGwm&0fr(-+-jf@sxizBKvGQ7?uzl=SF}|iZzKIC~6CjOO6%TSy`3q_##B5 z9m1V_l$VUYMG;&Z+!U1S`>d-mPNtlGXQ5~8K~X?-JJ8Eu#Q$fz z1k8v?-5U@;3R_>+Shoq_`mg;244C{5tLT8VmK)en@w&gvVNJ1Ezgm%8%oh-#l0`g! zxNR6Xd-b^js4R&OoJp7Z-_`)I($qOzW2zVlhZ*L%n0B=h*MjkFupbR~)bkz71}I+XfT48AI7kf^8wb>!90N0Na~>%5R2SgX`wRqSE`(?9+BtK2?d-eF5&1ta z3tyE1l0wWE1$C65l~;fSZ#h%JAbN9V-UOJ3@?R8@fa!p#j;bXi1z$jFezCF)NZLqq zi$5TAL#-wYiJz*bM)=;G%Y}jN^j`^aISqccz)BcRg8TcGSLc3fWM%K}iGH~n{fZtK z#-9OjZ-ruc%r1p-J!N$tH}|GzR}8fTDg1swYy&Q<)H=bEC`-leU*xe{c?OV?`g^Hc zw+2l+7;OyG3)s(*+^gU-h(QghRRgZK3_v(`yWFW&2mDbWs0M1a)s!{?;UJ**a#&Au zAT>k4Mb`eDy*ZGA=N*u+0_j(CX?aUZ!@DNYIP025?#hrOpt|6EdqnSKG(pb8_@h&5 zfu=&}tfV_XFkCHzTUFhJSkPh2Tn`VlWsJS)qZ0B=ET{guPM;Fj#@JJuk?pYKyd;*$ zRaWj3ICzvKVMl?!i^88rEvpCYZr8}{BJl@TR=uP5`2cPRY&LOSfgi|n^betm=~tI~ zt)XFIZFMjNhr_D8w6rwj{)$Ta)fpoC3^2`&mYd5drHLA9G@;UBWZ>7I*$^cckVPRr zZr>FY#u*QWEG0H+bJXkXOc_)vs=k}K?xsYJq6_YYv8Vy0WM#Faw{d$#T28i#dS9~*i-JZq~B@n$bTz}c`tea+mk#D7PO@~ zp~eCooftP2#e@Jw4l@uWYk&^i0eFJTeXc!T17=>i>UWd#m>^8DTwho-;2IVB^bik7 zI~wV9j~ae`i({RwMK8iFX^$cj+F$y3OZ8Gg*lBguYtNF(off;%ak`8(VF5|Ke?dR2 zcq|^gQ=XTh;b+G{#oSU({6S1A0MpTIZ`GF;_%pxOlvB}hSe3S zAuO?+7h})tHa$niD!vRHZhR$;c=39doiBvYi0ti!MpC4|_=Euf(+N~+))wl#2frg< zP-_yWshoqF6Hc_j4CSN9wum#DqZ!PIl0?MvuAFv02@*c6Sul}I<>hDGt7m;3(cC(UqC^d}lF*+5 zTW#sYZisB0~Mi*AV%beYb<9XfSb1ewuz|FIGPr#n&PsFH_ zzhgUHCJ{z3yD^&e0yPil=`cAf;TQkRJ=p6Wq~v!#h<7Mk0Y93JJu#V?ecfR`8lBM{ zNI}+W>a`-HakkI^d!VyE-0>OCUTBGd_I(?LcVN)&Zg!jt;XKfi4L-yH_-RS^(TQaG zYUqGp`!#0P!9e#X!QxsBP$j~FmsJuw_CsaBviK>MCL(3^YR3NAroqLR-Pg(NDTfnJ z)S8;b0PJCzZoK#Wl(kK)=ca9@|7Plm{==+LA14Y4Am+p2fLNWlzzv{Rx1d2n8d844 z)TkB*di9(@%flaO`#M3k?*JDbd6sNs`;V_LU(|MipsDHnrU~8iZ^B7fS2CXhrfj)` zm9B%fM~_X;57&SO0d1XHvOzP9xCr;FhKjSn>U))1E9HQ&9J>^wuy{!kENJWp%UF<9 z^3UHd3{v?X^%%3}X*7`WWN_#WY=oHuVs``&lqK0UXozvirzfR0?5LoV!ivvrs6>T) zT>t|FkIQFJFBE}Z6oc8iiktH4D8}jB#>{}8~Vrsl`mBO zBg)$TatCnQ#YO;^F|tR4Y#jdnlqlk+u3PnXT-N&&xXb+t-X9Fb!k^K?Q?G0oouZ^As zvr-1nnp+`)QirqTC%7RaqK_+T`W%Yhr*2s-x4R4LAne`&;(&-G=Nk{>MBQ!xSBemZ zZtffX85}gdj4u{+=W=D#$PqV9 z=>rVfFDOVJz0Iyme{b@IROUJ0Nd72qN*}aolnMw6v{0abDLC#_2vu>AUaea=SIN(n zpq%2SI>sg-*SZ*aX}+idW&6~zu^NO`C+PTaD4reR3{UW(E3tYc+)N(L&8k7BuxxC> z{b{ooV-7j}=LD9-Abk*Yfz(M(_mPwd4@Xo0xXX|}@Cav}(*jG%qzX%Fhqn5o{O5*U zzT+>G1Si2da(=yDr(56K?mHiFQ>z%Znk9R_+j=vt=A~&V!&}v|H!Lyp&}b7mu0!9-!(xeKm5%2eb$aM%pPTtPc>7?p`DhWz zVXk#FTXS9lyUhe3h`#pOmC&v=0bWbrR7cF7!&JoVzRSE$HvGo(Di8MW@ zRe}yHirX#Kmcq06i3Gz>cBQFR`1 z5Hu{91oEc6b7meLwE0FC=+FcnSbaL&r&oh0^53J29H#)*!IjHpTi5GsIV9h>wFRg- zSL?p&WVQrnNrcR$^@eqfRmle&i{*55EF5NC)V&KcqPO$X^OFg;Z&0Ut>6g+P0Qu-R{Ertw&!saeXJ$S`qJAms--KT)byWU`&F ziz-TBH@oTn>~@_%3bQ~|_3j&fnJXE!`30oHKn$()I{i0I%veV1DX$h6lfCYG7-j=@gasj@o)~fq2*dt8PABRC}T*@S*db+>ET=fmx$mJpcC@wxWXZ`~d-n6)NSGTd01W zK@#m7vM}T{>7jI(5sMnx#f!ptI1Nqyc`XB>Laho`Z2ZW|D-febTw#hgg%FD8$kV_Ck4!U2A#`3CA9w8gvk`R-tFF91xtQ_I9dNHEF^A1RN zzcZ~ty~As4;314i#|qCswy#0h&p)xW0gsf#yJZ}3i+CK11HOyV!WYcgG{U-Hx7dN& zg*nu*PFHnVbv1Cy*G{VY%_hwCsMt+-g|=zFmc=J|5`{AQjJ@|-R!$@8amWEf*Jc5N zV6Ispb52WAH|bJ2MOrg!Kb;IFVmxI2QsB4BhRGs9EO}{UabS=qsUR+RbEer&J?aJ#B=VmfApsen}_o=yDClm8#36soZlMcG1F*` zz3vO&@W+UFdYQH(rOs7%I&?`cMj2$XBcPMbZzClaa9<80jbS$yT^&w)%-(v%MZs&G zAJ_-bQ{*FESNk1!Op!%^t*4Ldt*L8AvY2T*YO!&v(PYjU&}VAHTtS5kg$UDT@n>Ys zO$0Z#_QS`%(nti{?3vA1WGE&l*RS4Q@A3(La5%5{O~b?M^7KpDk4}6>fw@<_&X~%q zv_Js>36{^`|30_ryX?)qr#1H#&tfZG88kMMb;Bzl zKz~?-SK3YL>*d$uDH63!U755bbzc`~RK<#bg^V7b3zF>WkzEauBx1U6;B(keL_DHW ztAeQ2mEBQkq>Akjzz*~>+6K*g?7ep=CUf|EL^5F)7Y(}eN+#J}fN((qaMBGByR2wp zL$%w184J*?Cm*kOf=}ICUrzEvfJXtfWcCHS!pE<+0kRL9J)?DE&HVFs8RA#gK%5HW z)4^57vuekeawE-u)^#s>#H|f(YY7=i*%%h?Ie{20*wbEYYU8|lwl793NBTYKGd%fK z`-7C-j;#)(j?l@DczyxbnIrMyut<>)75A-b**aHjj_f8e^?we3XuLZ9s#osq(B~14%QDAft{01gOYJd=vEGo+mnbp`w19Et9%WM zlXys`o5=aCOiQI`Mhf^GT?X5#zK-LUKS5DF@$Q>AKkC`w$hC?iag|vNvbYop*OAkb z9xR_6bvKf;79sJ7C2G@WGinZ%vcNMP2)woP2yQ1j^^WX)5QQ@YnI2s zn%9{OAHe7Z|9UiK{%#A{+(F;nSM8@@I4uo$qf7B}O0{;JVsBV}?C1xL%bm`2Dcplb z_+{T#^!GrWDmjZw%+i6q7#a9dwW9VosQ|aAqQTA2F{z zCq>wuKI8>mbOz%w&v5*LQPwn3M>#&v_Oy|Z-~87#_}D2BQ9>8e7-dyvL$0!iodSw6 z5efw{<~;h?%`9asF=`~Ep6QCwBygepENK#(h*#-bFx?hR5JNUp_3g%1_eEY{@$baK zGq1k&jmF|qLTEVrPIy_`wdSfqdhs%A-j=zn)r(;waG@U9bW`&NmqA#ZG z1FQEo4EIPdeX}aplDC;WlT^;U`v*C{7u#{eOJ~wB-3nifACzYAmNvbA?|A0_wCZH` z4B3WEH$N$iE{$rldRVkrshd7@`6M2U*+$Ygn^@7?z!WykFwo~~rj}<@C*siVG!ZCx z$-$ySM+x^fks8-t)~!_s+6LK9|N0*92^lr{RArLfZPa$A-rW}p1TlWreL2=j@4>be zmUEe4h8T^U_Oj;wL9HKh5;A3ed9b6HY|!BH9R|t>Xq4|1LM*lYsch*5m11gL5g9z;VD%{VBb_H?C;loRkK=II(eSe9r&Lniy!JK&k(ByX6NhAH_63#anb z=o2q&hDV>I&okBzI*K2c(Q?YQSPV-57SxDkDgnxM_t7FX;gIQ^m#=;d2~XV$c%5Ea zkd@n_{-81n$hxIgKGdR>b%mr4l!Wnhvw6={?@h339t^kM{+#I21Qa~D`IbU-ElNZ? z^UDzHKK`ACy}PFu_BSAo;kb;%;Y1x9l%Jwsn z$bI|rrTgd>9!rEtFsS)@cn>TJQ1%Qx)&w&YplhYut4YJ-f46G)ynpea>|*2{1F*Dq zbWqUK=$pfQE|o^Mb&Gl&+weIMp)9b^=oqwrTl)pECOJc|Y}3f=vmBV$KY`cB-=>+3 ziN%!x?v3PDhsE+*Y4QK8t~%pqGfuofYY;Cso?p&bPDP(w7M4lPq2icIm#L9&L0gqL zhZ4uD+K#kzAUdPyH;I|m{4!a6oqQ&~XklIJoajK^n}PxViQ_iI<|xXC^>=Sx$mz&5 ze?N(b%{tdht9S*ZxRvEeOi~N>SUZr=y?Kf}Gk~Csv?9e&j7Aw5WPj__JL!g~7?B^q z3;W3$O=oYzRgr=-^LB>35%Q?8{XHtQJl`Zue#mp!gsAAJ$Hr}^VD~h+(l!a~Dob}t zpDj=(FoujS011mu+BG+G``LC=jR9QV-w;Rc-Jk&CU z+O3OB&_OnWFQP}wt}RZVbGkDzxEQlgMB7BP?_kpP;0bIaQt9J=cR`+;OQk#Uq4v1| zsal_*D#p{QWs|Z?$RCiYzPM{vLdoF|Pr0hsmROnA z7~X{~TOM;$;Sl9bQ1%mimhw(r7<;HeRYJ#@3KlT<^+LT;D&0WUyrb)wH+j&qldgizXp@u&3~wGTrVtf9GvCA_pDi}+^ETZ*-Jc_ z=QLf8YV}@ss>);-<#qC}rp|3A$<}GzdRGRDNULmVI6PY|RIT$!ND@)S$!<}Zj2@0I z6;~cCzQkB93M}N`=0adRcGq{~xm(GEAjAEe&OaaP(t9}^Wt%yDQo$DIZENpL#C#j4 zauB=9I1y6|?SjB#b-fwIyw*dnJK*zwN3MgClR&Gs@%|jF=46AprHvh%8J#rBB>S}( zF0PhrKmS3@^{PNtj?bz?oJHpq8)!=hduiy31aP1^Zoeo4ny1>8FPBdUO0(McG4C9g zpL91SMu^r`A+ecYC_&H;QUc41$pb`bIT)cx5B{%UegSWZciyP9IsXs`N3cUtksQ$=T;VME#wp2A6k_dUs+72uG(?h*&k!Di5sYtDEBoP-pPUFGzq4B@OFXEO%9L;)la8ZF8^~<~&L9P-=b}LB zyNM#PS|&RP#Cfd475H5)m_Ogn z>(r{_%WO}LJN_6vaNo4a~4oo zCj#F;pmu4_$!3h!F=-&5qYCAXZPKQ<>!Jh&qdf4SEBXJ;h}x9Yu)b=z* znq7xO0aAS_q%_Ay_UTR#RK#J*c?g@oD$VvlBeH9)jmTEZj;GQkl$ydm@kyqj7QnDi zeggDXEbj6_{Hv%mfypm(iw37WRAWDa>Aux24!s#ba4j%vLS!}ksRhdQW4eH+UsqkY zxj(rRLPB)9e@3ge9~hET(%g2QVK+)TvpU$lkrI^3esw+;JeApAK9WtTMV+DB;K(UT zen^X1)fZAcSb0V&lTk0NQMG?`B02`;Wn@q7{32yFVkaY}MXg7Co_R?7+!Jw@T$1sz zTR(d^&pV29tH)%jz_9RZlY$;Xa9hnUg|}FsN!IW=_w&NN1QH+6%67T+P3n2yK-A~} z$*y7O+M`eATL)kHUC)!vixxX<&cM?tJq63(@@basz?$`dJEQ2Y#pNejcWE%P@%usG zVldrNlxMJ?&(A4zBI&u*`_+hE_pQO&N^(H3-wJ$3&#)^j+~jxi>PB#+g>aY__@4^o zY+ZTfP}}3g1uDP~xy5jM@Ytu-@}myOWkr;v(deL?6mx1Vp8UU(u<7%HzNMPaaXJ`1 zo&Z(^ZT44XIJK`dJZZ7nt2w&L{$hPvY@;Tha*_N9_h#zaR}pC~+^w;)%z3?&2>9%y zpZW&2y58rTL|Ph2>F!3l z8>G9tL%Ks+BsUFGo0RUewm$EFoO8w*=hO4)aj@=t?R%|x&FlKjv$r+5*Ph3Ggm}aa zx}*Xr*F--sm?IMc+6`R849|fp@o2Szg344k4x6EEtwd;I{g*q>zG6JbHWp0!R{eTh zLJ7PmaRyRRw#DnDPu|OHODA(@{p*)-Qi{(RsTQ`nHQaF}Fx3Em%2;j?BwmP`j_1q9 z!9epT-SR2)HJ}_@|K9yFyu^z7JGHqtPbiD}aQ-}X%U_7Q&5#v;#tUa(d(gW8`sGL? z(|GjO367+Jxp(~ggmb}znN+TUMRc!MR@aF&2$8r^*pQN-`wZHce>>EUpezYkX{|ae zl84qf3^+pSq@~@O4kwMBC5yaf}RoS362u8 z|7(R}j6X)9vCR&({BXvcM@m*gDguZ$gEjpuCf)FH@g>jYtVE*drO58^n@N)kT``K4{m5S;Iaj|!-!P7OY%+_QKuHfODNKS3_MzaPSh~in^~fB~te<@&a>K%Qd}RYXf{dE^ zLYqB*_}h<6K1YijQ^{KUeYFkcb?jDk7lgW<8MevUQ*M}Mpb|k0TlUpo`0pIcr&Ap9 z1(cHekkL7=w7+`bj=0B9ZMf@kgsITn2ZoS0H9BGnsu&UztkBW<%ZQ&E+6HhK5v6O0hj)*>%V=2ZSEZdEE?dw zP$~d*?A<`>-2Wa|RyU3W_;BYg{c<%GY80%$Uw!DX`V5TchnKI1uZ*5j8P@hdeYHEE zq}eU=ykL@>9c3cxGhrsXCLtJo6>YmxAt{x6d%MF%e=CNbYo!wu+jn03`aSTOT#p7T zuWf`8tb@inikZT<1NAU%e0#CMC9&wMetU_TVQX`bk5_;GxSyUeaTn5%AD%V3vSqZ2 zIMT{&$Mw}|U=yXelJ^4#f^eP9)VQ0P6|I7%uDqZdDYUu}73G4-yoreo71%x2a-rGf zh3r+|ig70PlXzb?ggC1*$QUoB2LhsxcHkS4A3A?F2a8)76xOhE<5XcsOqezX>{?Bu z=tCp7MtaxtI)>9NC+S7_BG>+AxM5~A&OQt4qT#P5`B`WCoV+eSE4v+*ecn36NlqED zo?!}k7abSAd2Wj8I=fwuA8MH_XdwWkz#xz>x+C$UW#GN>uiqb)w(%npngUCbaq=Bp zNVsjp2#PAWYl%k;J$*xbwYyL~q;lkxC=DLCo9|{v;K32l3O`CW4kdYOckn5E(hy46 zr&QCQO(8OZ%+9LY4bN^(lk?(lM3iL4W_yA1wr-udHW?y?Mrc|e?iXgU@`1)*OV`fe z&Ov;-=yf$>De>3hoZ_|4LPVzhfVo(gCa*g@`uX&|6B_j~vofC7I#OvGr-$fX1Zf|s z=s;Y|d$sJpokHVnn<8S2go!6P5s}29Po8kKM(+ZA%B7sTQUp0Ka9@~A&i}PHg3Kt- z%6)w18&Xa$D!TdA^}FtM;tU^s7{#&Sp)@cyvch(brdM2ws65Id`dTZ~0wmG9f(YBmMBjzq0x+{!X zn-NgX7!3%Na8=T?8OY(KGG{oA`_2ov?2$w#9iHivJrnzZ!=E8krfrtB{Kr!&7c|9N zT(Nj%4mzw-JiBMu+LT*mk}RkxPuOF+++3CR=-qiwG+OxYE#j5G6_}CJ%3#+tu&LCj zXD-a`$j~aZB_GG{_RTQ?=hw2R(K=snD#)x;2PDw`J)%N9akfdltvNWckerPBqhx%N z5SVPUkL5GruMHPf#Fyv zQyZE5B`>h^`&Edrc;SHELupF+cgXOUrm|cX*$b;oI57?2oq@T2E{b4rg?B(D0J$c~ ze0c5xb<{^ZD29Ds-hl?-Fg(6LFlWDp3%$YE7~v5l_=OaeFmAG;u~^{B@83*)ARO>5 zz<0jOT26~h8wKk=W0O&n^qVo9zp%l*yUwL`rk&A$t1Qtk^+TYx=3_>|Vm3>nmq{Kq zC7wx%YoFoV-duHUBnEm5L+>@7qS*HOOn7&)$555Q4WE6+3=u_Sd4y4uKm@5p^~-2I zu<`ej+Qu^1eIitwtctK1h*h#w^>mOwX@gusmDW$E7nw{|z(C{9!@OJCB$p3M%vEA{ zYhVFuuhbiH-s{n;UvGh}E;s^TStJmS!ULY~r= zi|4w0yF_6?)vQZ?87MCPMH>08q}sGa-~s#)Y3K7x#xX0NCy6*^24ouG0Rl>p)Em0* zzm?41oh{f2-QPOe#8CQbr5X%k!!mfWFJc8gI7Z`y#D1$u&az*|ercOcg*Z9^f1e@K z9EeL55({E^#i6r)PvM^tl5uBY7AYXmDn}tw`$yt-=os#?S+=+V|}y7V}?YlT!OGRHlm)m`~4`wLMi8%TWkQ&zk?-xDg={6A3| zOPwFn4KfTG_BJ&)&kXuuG`6!$i@AjZXrmId51Sj=!+yeNZCUCcN`54<G~fw;Q-u?Q zfPh|4%i@XBODbMx5LT&#`%lir1)k*Zn>};(Au2@*9hJI||9Wqe@A1?}Es8`#9OLTL z;{W6+G)Gv90)0TPMVmF(Dh=O6)lTn)j4}AIg60T|(F`d#gU!{RhOVkvb^XiKkURf- zw=f<78m`Bv*r7V+xyBp@UZ9F-=K(^zgyd1v?pr#Z-w~gOrDsrehvpB^X%#@iiv^{2 z$d}LJKsbWK>A78sy+ndsbPGQH+cY={yIOAJbK14 zN!2^Zje%?@@R~V{zrJd#KGmX{@sek50NW%-8)AuEI=yfPQ5&yzXM=8}X=4@IvqJQbEq z9d|BEH527iou` zJe+-&1IhbKpVjZM!(B`Qmk%j!k_gSlHUVuy%mV3VVPQvNpmwE@y$Iix`7j*)gm+~} zKsKcu8oq-RNAoBXVL-QNdr050iwN6h|i*0(UsvS<$7F zU~#j{vnPzKZ6%~!u{k$jdGI*hzOYYvNEE#@COvqxQp8;=9wO#%*`0W>-!OrOz&+jj zGBW?e`&;zL3-<+kEh}l%bRI=$q0a__8$RsTO!(c9t1V?3eoDOE7uLkQT(S1MKoeI@ zVA(K_#^E4urn3>Y<0}8L`mqD+<6CcR9nNlIGS*ca(E7a>?Gl~_{5TomII7;)GH<9t zDP?SHn{h7eTSzeMT^?OHI7)e}-$nWKAdgIXxUa1tGxAsBk`Xv4QT3XWv=NGr6iDde z#_RW3D|x}<)_yvA&MbfRXKVycu|L4OT!0oWKVtp5jU#M_>4i*Sk_ly&Vl{dOf0!W0 zLv*Fk1p4!|k8F0f=npgNUdCLllue9#J{b@9L=JRQop$_P*4mKfsGY5_bN#zf78IQO zGaBjI36tM#sgXm2iRZDvA<^auR5r8*ehP%zz8?`V3qDQ;%51HCAt4oQj?EaX#&MxKd9Iz{?#y`2 zio2r-KX&T*)Fu1RbI|B99T%VUpcApX^K9~6^QO|9j!-K`Yz)I9YL!Kh?tq2Lpg-KGF$-JFuKJcpi(-`o(0P*~HXjttwq6y2)+@%CM$U@C=c@GB{r9Jy2|^ zWzjwLxyi2CF@MdVvPWA4+JeqsW?#|ISJ0!_yAe9ojRZ?Gp&=IguL?~Z=h7d)FkLfW zP=VW$1plUo46J?0xYRKF??L>R&T9@%RV8Bum)=G-Qs$qbe|m;(gkn1B?5<^&a=W$9^0LwQ>F@ z%Vw_y!g>5&=B@jOCQu>5ipEy{@C!8~8$IQY11i!t4MSmkT7T%* z?NfbsLkY8Xlqf97O=S=E-7BOhZd?w=X6?;m*Y{$`9dlY%2c9x6)W|tdlkNo}HxK90 z8*Dw@lWMp>y}mTt(w?<5#i%ER_|Uo~=yU}jZ?{yk!33U{&nhyhzBq|5mH9~liP`E9 z@6~LdS)WO0MU9on2}-QKl%BlG^fO-%Vm z9*4+FD!Ez8-ojb=Le4yL6nkc!CJXH_p9HGk`^Q%LgZH!%t)Qxh2fSri*k3~0e#EEU zYN>U^Wa~}2UCI-{EfkKRO`v_trSlxlKZ6~TAMEn-rHuKh1GKJ`ApP)x4_M=~N#$zv zFM8j+HxMe9;XXcMzKW-JGh5IwK2|}Mp5=OcGq&fncZW~v9O<*)G4~{hpEQ(6!zlBj zdZ~DG8>nDe3oEwtKP!;P5AIp29S2OC#xfHHFT5j~?ujhl?iqsv^e7=`%mG$LSNPw#Tvlmd;@W^%^gD)cGs> zA2&h4&x!uE-~MGttWK_id@AlfS%9qPoy1h)ZR?HI?{S5H!uN%SwUTuRF22$UF$JHS zT=;^Z*M)ohb+3i!>oCzY28THC4vpl7DZJMV5Sni>mZeR;4nkk(Tm+}VXdi8t>!`Fc zv#eit1Y)C)d1pW#!hs3rI=!jOtnKhrAWW50IPWsa0z!cbHM+HAWcTEJCJ!At_9KnJ zRhy#D{(rA&MVZCfWC{F?zoFn-nPS#*fkG0v(6bcWiO0`I8n6Z!@A;OdR*O%5K~%%VCkDymz@Bk5cbKsneGr*Qt|%wHjM&l!k`c2^ zb>zQdFZ}1~%ic0op5ATWdJ+30j{&!CYx-RMv!Mk6wu8tf3icP%9o0{UNL4z$2t;f@!0lTF(@(VXavB+Sp{vB^X{N;D&ax8| zA1_5@4nZFMASBeoR-Ti_Wksr39{crCHWveDE~)4B4c#ZjcLjI($ISm*!)nU0OY>II z9gDC5*khlDFvn{3Gop^z*_P~Z=~6d64C*ajh4+^tAQhZFxS-EFkGVEl)ybzS5vw6b zqf%9YR&+$kieM)`{r&mNO)L~Fil-cBBtWIbj3Hd+`P;Ns##D&LNv;vf`p%;#;AmGW z9UAA?#5BFxH!-BMR2Jvdx5B1 zEvu`zH)$oybaX83hrR4vM~juZnZE*gHPBOGVKNClcj(_RvFf`_tF^1|nW8eOA9;<+ zH^opwH72S2(KUr5iX7p!&P8l&y|!+S`?fj%LU*`yC!7?p82jb41uY}{p!#2WddNs3 zEITl~?lRw-6iBf0PG=^TtXnx^G}gew3yp{0SmP_Gf8BmaAxjzbC<$NNSc3p*NAA;QSc^!qyNN;~YSMI#*9@Wi`$gen3C2W|H z=fdH;?38a}8Fac7wBR^p{^$&*k~wEsyj9mu_EwKBub^ zWnhuEp(s|zR66#09+U16f~oZ@V7B0}a|h+iR|>D%P+U&9wrs(PEY~#u74v+*Tp@cS=ObqWQ<-JWNdlEi$Nt7&*4DGY%&_Mq8vU?3+vrlRM<`_cjm>O*%yWQPKz{nUGjhj<*aZL9xwY74Fxk zLiOfCbJdP?Gqf>X9uu8})rLqR7>wjz&E(SJGWOv^Sv7$y4kt>rNi$EUJj*tlS5~if zIJZVO*Q__U<3z%v?!ftSyjXguB&*1{iH_%v=iWpHy;n!o-?{)HL~0w3;wR&klt=eB zj_dZ3&tI;!+LdP6&k6djF1cIZb9qkZc(kBEO|e5UYsB@jNBnfo|KTe#^iNJE39 z84l%LWEM!dZ<9|o)?Jf~+IEZY)890zYqa_9RR`;HYo^&|tqp__Ht41{1&%zOJ|!rD zmx_qsKC-VUm^g|I_^Ym2c>S+;AOrvR2XoqYspdJ;NEU)2yA}l9LCOGC z=*iul{ex)^7sGr5J+Z7+7;vc;yUiv%=HSs}d9mj6*#eElPGX<{k~;3;*(tlWM8j3Vl6gH^_=nMDt<@>kmLD4aPBeFKHpfpSP7 z@z=d;hg5F6r@W4P9RT}S1+uxo0|YaVw>>l81oQ(+!pcTB;Bgywey{&&2x|*>ymP|J z_M<{tHKB~c{h0aXB_4_LhNH{Cq&-cZ=_j2Ds{W;~?19rOL(AKW(!MS)$@#Psb!qKTPyq*;4xQ_d9PtoB8ZZVy=vo2?jy%E21EoSpr(%YVJXNWS>TZ?goOy<1j?NL#-v#{HU0d$K!@rNdF5&QH%9lT!|ZXZpr$ zNDLvrD=LccL-{H1id9kvDnkR~M< z77&b!=8xDr{QnE+1kW6GPPV|KxJR-+V$PEL$^1K@UD3wGv{#|ky@aq_N|HD(@BFbY z*pZO(M`_5D8uxb5A10AXyM@p>1b$hBJ(X`P5iT z?-l4trQ7H}lYq_Rd3tW*Iv;o^m&$iNlvBvvmcQ!|vDOoUq_elGikIe6e^Dcqn@INI zIp`$hIos$AU^ag(07^ckn6CSfB08A8(+6gLj92Q4wA`IT=r!sYLvR^JzNI# z{#PFY8DFxO>|$I^G%Td4rqS`sd~u9@xk@9)cQ6+I1A zQ@_7i*Nxcj`E}-fE50m!V~r4kU%|-~9MHVsiFnTUV&eCO#N+yQ8ZFhW2pzB9Sl*uz zSjKq~alSF^II}mg;V(nu{5iXyKY6zCz+6zX+Ww3qb?{#Zw^z%F$IBkh_nDj3Zhc|O zF8987IYy6}?9gzmwKaP=RFfpY{Yy_~=y7h0B8Lq8<`% z^ZT)bSg|kr>jjniaZ58!h~C@1QmH5CKDij&A#Wc+!G~|VdfL}LpFU5IBplKO+o~`$ z?fQ2TUz_zl-Hn0hpr9b3JgN9K8h z!J@{0fwGjC+k}(I1vz{5h3f5M(}uShYq^^}$=g#W)TN#~$f-@dB;^KgUB;Vpon7h# zadNm!cuE-ZEt85}9sY3mWG%N)0LWZQq($GKCqq@%hh2?9L3F4}T+Te&X~Kq|050TP zC7Q80Tk?H7@tB|QI2msM3659q)SEmTZV`3Ofv7$f5&PVR)nbvtZKx!r?H)Rbk5vYyWzJpUV)ig zwwfd*a*OdK%MG5dZmy=az9-;YJ9WD&+X=moaM7C%P2hMBpW%Ujc@52c4-ax;Xkk_^ zKfEW<-}A$0qvp4l#{jsh&gWh209Cd>SRh(=Syij><{l&An~=JIdA3-mWY z<#ogv)5-c?bqIzAu-Bd^8!|%g9L(3AM_pCJlzrD#7#xwx6|y1g1$k6u_x`FyFpX`j z$(I$n&u{3jAm7AW%c94FXC^Q_Ix)F>gv4v4@Aov54bL7j>7Z%%{T|02 z+nyW;i40-adN!pJA?=*it=E|chqOme`p=nlH4<06dR+rblFiK;=E%@oRT{as_b6KJ zBLLJNo^cHB8E5nym)uOkEv)4kUxfxiLG#Cc#P{MwOJKX>eI7h~jMHHfa z7T$cJy(;p^BHZf1lout}+!?w&jt03N8_xIdlY()Gy}Idj2rX=ivv7dXTL<>zo+;N- z1iX&8R=0~{qyg}QrI5|6#B2kwgUgLrSr(AG0@IwdJR<&w6@wv3S1|5*oN?($&2oRI zep9V=!jd7M3x5Tjo^8g?m(Z>C!*C$uUggs#0(ewp2lpk4-a%Med^DE_aKXH7U0Wpg zrA+NjiJ;o5xmJ#rqu-PO#(AE-aen1P>gzx7^@8?v%3Jli@*p73XWSiwXg+Hli6&k% zTghL5hI4Nu61rtkc-@57Yc`8{?u|wp){eJSwVFZzkTkDj^s0o8RdjAF?on{HsTp|G zcPvIiIWPciBKJq(OPHp{K?cFSl-MIb`jmNN3K(#r(dl9^4qp%XAL?qGTdqoH*6}v} zqtnS_K=irG&U3$7Io||47YW$YB2Rd&r7#g?GMS(CKAiX%{s#bR{JM2o_8_P+8tVhI z%xunfKmB)t|3gZ*tyJhIH5p!8c96ow9Ve{ zw8U6zwK>cJD$uMA6%sWM9dM+kc2wUDjhpDweVYB>lbZJ-yj5W(b+gUwT*jDVDUU)E zNw3%KVC!2hFekigM2kh8WjR77X?xa?CCyH9P-I1V!s$44RJY2=QMu}WVX>_=4D7Pz zT(WH!Sjly>#GMC)MkOD`4%L&IVXp*1|lSaTV^!0NO1mGV{SalzSSZ> zviDo+FU@YWi-!zLvV^jb^o*YEv5uZK!quHx{G@h+>^Xb%Kvi32A`b${ioz4Qu;^7@ zuO7?2`BLpv(`ucy-c1=~ug!?$2|HED)8T&reDU#Siq#q{83!gN#j||3Sr7P&O#2`7 zcwG^OlRiT5f<;&6sH=0jqEHemb$zhu7?mP@eOVN8S3H z8*e=S;7DffaknA@**X-5!4N#W6VB?akr@f8D0n~}N$ULaD>K{9Y4p?D#?QJB5z{s3 zS~orEYqM!}P#Xl(UH@Q8g}(}KiNN|cDcs5CT=Upy{1D(tA)xdQdR#o7DuE!rMEgW9 zYVb(n-4W5cMmj9=i%&GF21p|yihws-qFz`0rsg-GIM^c4$)t9`;L{g9xYV^1IpQV6 zi2zluhu4gPhy5bH&7{eBp9(#V*RP7CFMA_1CLiO@PrB8gOclTE!8H7%I45`gZaQ1S zsLpzH_%@I;Lr$6XnnwJ>^a8T`iq~vPbg^vjmZ3Rvk;27@Z))u9>w)Ka>d`hRF@@zwyyq*Jz@g$1qVv3VL%es zKvK$VR6#o0p-Qc|e0d9L1j&i7lfZ!-R%dq=$n|4%1^R1n9K27Jhu@Y6;m|e{!w$*4 zeCfxBl<{zONdd6Cg-y5u*5_Yyrm!)V^jjLVB44TEA6;x_czxxt*kjNnjf?Z~lgAi{5SQ9! zX@9bmaoINCmK48_AF$Ov987fxkt@tci~Lo+x+-h#JdR0w;bj>VTd`-NDV54%s<%l) z^6fxFB=_xG(Gd*}X(pZSL6vTsFIi`BzX8>H_daNj%=hUe@$*vIMA~)Bo+NzCdH3)_ z=a5|Mbmk%hP3Fg)ih6R$Vd{cX!Lbmk^kC5e)VP{HDz8UpSaExAbc0I8~sLEKA>YW5bu9P?S9xgK${o|M)%WscAx;7^|(hHWhytL^B0`!Tqdtxn*T7OUnr;?d-C<#q74PbqP&Z0AR5Pl`I*aO z7(*26V+gm^gxJAD`h!Yq+=|EV4;Ksbr3XD<5iZ%BhEH8JX&U6df^e?V>*Y)^jE<5Mj5?h6!Vjb=`Rtzy~|To{dy5 z_a}vIe2x{@O;;k~WsrqHUV|Ae#QI^?{+lkmzgjvddw5&BcX{! zoj}yp3z103QORl>?d-!fxl*dapnurfvLVffKiUdeO5qN6>@nVmaUsji+`ZV4aC`NB zI1H{Alfz%2+DVKLnqy6>7Ja##qebVW+wteIR4(7-4H48#p|;8*4IzM&$~WIXpaL!s z1nUg@K|i61K&Z9ojKy@39)vh%CZrlND?}bJOE9YnuKE0f5|KX9j~UArgQ*(N;r)*HC0v}bLBV7G&cJ{s`K^f_j~w;Zpip?Q z#LGL%f|Fg@3N6j$42gjDn;foSb!`7nfy#>m8 zN3G0EvSNtwWiz>WD@Qk-eMcScu;`mwxz(KxUVo3rNva2vMlYacCwrmtv+hRSz8Q0J zWie>2B!2q+^F0sb6RY5QMU%I*;pz-Zw(03y<4NJlI$d+C{85+5QO*7gXxCXQCjFjk z5>+1H*s8j0PoGGN*t{{A!hCz3gSA9@W4Le;RHR7S9tii9%%G>aLb&IT=c&T-lZD@QyCclY2k2^SqIkjt2l9k!5Q9&PVNI6esITr`B=*PQV&N%z3m5zh&)F#v zI-Xsc*F8TQaeru(Dn}oEax9?Q=K0Kn z*n_Or)+aI-bPB62*p-DY97L&V$VS`)=jbP9h<;>{LNCF}J^kiFa=0`LlF z$(K0v_Mc-#Hbhfoj`h-{3lP$jFqGVzKiIqhawH57_Nia#(_;S5=xi?55{n7A5ADb@ z#X%ucaI~H0`LU(T8+JF!09neo&KV$a)fLdd6R1BpoqI#RlZ=Z_fBn0bS-!=~2Cy&8 z=t`s;+VQ%gAg9^qJyWL_KJwy@pWm&Y+C9k-x!3+va;TJ@+l4SUD?%=Y;ze?4OFh*| z4yL;zRDY>K#hTe7f+38Lvm4~zNmvjr$He*K(}PLA-Tf3^rLJqPXHzQiRpPIKYdI7a z=>J5MCmu;|DlpmcceaYY=;pPcH{{URoHeEVIRP;$`3`KkQA30o?@sLRy*|i{FC};LGw_S&~a`5!2r_yAz z1hbuApViq2@Q0nurJo9Cz&Qci=~+P9MZ-y}Zqczi%g9h^I2`K|TVH*WWjrfY&Kqro z3i>@F4|@V{>GI8z4q4X!wmU5YV%nOR&EKI2qWG^h3&R!>ptuzPI#%K^2EZ{PYup*^ zFaKhJfu6c7{D;iSeufa-bD`dbX43xEK>^v9|MEG4u~D>+ zqW)_eyWu5?HD>5P&|Wz~RcoU%ovrb4Zp|1^raMENJg!{WR%3D3{5H2oUbPh12=K-% z>9$x;UAA%@Y++SV%jxL^Lk`eBgwOOscyP6cDdp&;Z5@U|?lZq%N6!`B(Fto{!?Z{_ zQvy)yz^9ApuU{t;_`(&xr!xJbq7!g*y8=|S%75N>Oh77O^9V{&YpUO|7H{U2bCokW z)tZ>uI*_;vpH(%*ePa1j+c`}6z0&C=)*eEze_zA?@>yf*{o!DzEeW^eFfoo5V-Gya z;qNo?>74#*@?bZwKi5a|@E>x)Z^U5~EBeXUsmoJl3@;+9a2Hf&G?&2wn{b8VU?JFv za{v5qruQ=F-y{!VfIOqbCPMyIs7tY$__x#~pqFW4-YHN0)}qxa>z~c#k~#FmG}U*G z*YJifQ=^6R`o)ce(qshh$m%U`$`lsfIZt21Ia~dhP*T>Hwf*HZ7eFhkV;f^eHkIa6 z4+7i~oP1OP5TFK;hXfOdhM{^cyoiu3NnbroVcsi*XF-(N0~bz!Q!;IPP%`*G&+)Gl zBBpCRI(D_&`{k#vd@g_~`wtMCAn^lC5|MXPu)y6`egv{(?BID4HSd)2Rlnh3-rG~* zOtVMXCQY>9mH&u}lmBsoA(y*A@jDiy{iQFF{CCIo|9hmO(eZ4ds_BSlthCaxP#I9N4RYdkYJ9?YTqF${gf~X z$P47A!QXW#K2g#$Z^#ae-2#dI?9NB$xa_W;MQC$H45rZKoJRcwHVCmkd#U_SZ(3t6 zQpZyHQO%d0F>0Acz8!~Q@$iey7m#XA5#nAUE_gkZ!4RBa{>~f@F6wS1GU7&1$E#PVsoWg zN&PZRKz;?-5PIqQC(?w#RO*Hn3e&I(Z}-JN2qUX?!=!L|n#|q03;18V37_NA)zps& zpR^~h4}J!}rWhJ_f=&M^rG_t`BYHGhJ_f`7-Md?+WVl)`@ zu8ookY@~~J0wR<@K|#OdCe$19*FvgRXuBDv<$QW*ML-2+RZxRzZLrx)uy6Z6(=LiH zECCHDwlh(!5xLeN8SiB#RI7hbA#!l&$;8IvJ^wg&{b0Anj%a2AJty69P9_s8{?$yy zN{>Iz<$lKU(e29Cy2WxDpwpY7m%Y0yI+F*Ua)ZODyYrK0VmU%4rd(nC&jM|_l~S9X ziD%p;h@RnV+%NhJAVe37b2&EYOm7C8dTVmhd3U`-z7qC^UBlI5O({V*{lw#1uY4Ib z_V7Ku#{cn0)=LKCR!l_4Qi|+lZ=}0@l}`HChuy%I{J#;?X}}0E!1Ryl()o|7`zaHA z8)jIi^PcR>q6InjcsO*h-yh7pIoWt*3c$|rUD-h>s2dOb56qs6r)FS4A!4EX-{Qr3 z7%_4y>DMMEJ#8~y|2D0N_0g=1X3O3Km+XD@UdNkXiK-&OIOordz&Z@=&yy?(I~*n% zK1&Z8{ED66hQ(H@?FaPQw6&$ZmsF0E%PJ74o{j=?&)V|$&ec|zP|pHZsX&ri91 zqH`3{`6e%j-d@*WO_QrZXNwNUlkycU>LY1b_jM+a?b+WeUaHLGZ(rH>@Nwy?50r5= z3g>CC@^sM$^QzuhgWeG=j|>vJUz$F$O~Q#eDGuYN*9FcjHMQ}{r0W+>ryH9Y&Ngu1 zM!2KMv(4*FO=FYIKfQ{C0w1zHTMX;SU!?`-VZdPK9fG4-uDuLnM=!-9?^EJZx+Yz5c^h_XxFT zqHdmEuB~A;+7V+lbd^G9A^Y*HmwQ5vLuuezE|996trpyzD5?E{`d(3gmT!He)#57e5Da}qsp1wB zWilZw9vYEWBe(i?%LPj;>-P;UMe&0=85x1S(iv?DdTGbKm($wtoR`hzM37@B{mXKQZH$n$stUe@9sDFkm=89jvFU#cBC!w#yr0^YNV>9)n>9D&^!8Ic37n zk5SJrpVPq15yb#)0X#Cu@-UwJ#**7U2G8OhHEae3Y`IND`CKN-)SOapzvDRFC3c?b zS{UNeT{yLAx9S>bJ?R}BNonH^1B+c7+;%rHnwZ;mo_L(&vig;EEQ87)i+=jsue3|7 zKBn!^@RY6fmFRsgxBFZ$oz4?+jJ#+2EXMA*=<7LjmhpsZbvNdFA@t$pb{tw zr}~|@%B&0O-H7H+RGQuome%_VF0O3-03`ti1FgN0TlZlu$<#XubSIzj z?(M3GaK=l+w^!gNz4|ba=Ln|SA8c<=a9O<2!>$zP+3F_fUJ|bmoYy(6_P*4gG#G}k z64*HoclLb5IbL6&I2Jvm8@|{N-&vQGiN-Oqs|BN{fjf={1%Nucib4xWo#c;JALW$t zZ+|Z>*qs=x{fN_z4Q;Z4bOz-f)gU9|9?%8BiJ^L^%Z1!%Nf$`Dew2cZK0ZL=)mvE&UpZ@%^>66# zS+0|UI$Dk@4?s}`&H^@H0UgP(uH)n`Lj_?n-y)gto9fKR&_Ep!qn21bFP&|R-BFSx zE*P0c?s3PbXUI#@eTR7q4BCG;72pWIrkQf4Blfz;05MIzg*)9ZsT>+~l3_9lGRJEv z#@b8HHJA(X&&uzKWi*>Ieo_vNsYT^|#LqR09d*xGQq^nqZkTu8ld-z=0JDeuqGQ9w zIg2|PKU^?}^Ug0|soo%(@@Z?Ar{KfA^@2weEI{yUbK*-nSDAh6*7Q7N@b*Uf`R0WOjQ0LJn?B11kR5%ZfW!bBSt%SRq}ZZ))Nt+# zRa81Uxes&7K6ZMrmob$sDhmT)J(}Q&oiz=89$ovsTN_LUnai0=&19HSUwDfv6Kg1q zOw5aPT8?$!`9w3!u38GrY^+UQMEQakCw4Hf;N9Sr*$3@E;V{EtTW@MZF)&&;s@-1U zsNLRlXS36cbnZ`!YD5;w+&=U={b>wtC$#0wmfDDK&ZckM#_VbQ9bPa76#PH`TwJfv z4XqZ|0Qm(Se`S zv2mn$beTT(d3o+fpq|=wV#3+qZ-Xo45WzYi^VE3Ih}+eE0NflF(>r=nE;k_unw3{u z6{-E{if)*0T*?lAI&Pw;-CV5Vh~%Em%^NDVawXN9=q0rkw0!q`4JVT%*cAsg^Ijim zEuGoA=R?MLeIcJ}5H#==bkK_*(`1lbref0pwOVmzR6fX!n)@xJUn;SbuMyi#=1_k0Y6_}Cro#3c#u)roqn-_vM|!~)_XXH;a5caT zz<(ZhWE=24i!@6)vux%?Ha5VZmdlIV_DhG!3V}S~amkz-Wk+kZsgz$;C;h+bZ+n-K z(3^k!b|xpakJ1{hX7Pd?TO?N6!0k$x<)9R(30a)aiL&w-QOT8ZiZ?GzUXf3^2!ZKw zmB+L1`xk$c^VGP%Y6Xn=QodRR?q++}o!g(t5oXN$Kp%~?dU*2YU@QR%X=W0x_sZF0 zF`|95@pb;zTHua)EJ0vf5VO~VLa=0rlv6G)`25g$B%Jl4-f3HF_<}2pi}_nHGfmj| zUmfMg^%tLy-#y8-PYKd6Z-CVnWd)puYDtge^L5!vlFn7w zlH?-2Z1(%uX%)`X<_g|d8O_w-g460FE^0kBA!A@9WTk%iJeqW5;x*|`3f1pccPhF~_b9}E$64xJ50%q&ZURPI8qQlNe+DTVN05x!D1tFv&!mpg)geB)O(DITM%gSNNjSgZ|XArR`~Y_bE%7oR>?i*7Lcj7uUnY*7}>3yfyW=x$gij$?}L zY+%Tla(6OL+p`b?ZprO?)`OwSw$7avQB1#%f5U(jUJPL{_6N3m6!r{~?H@MoQ>I3- z(q;H}rm!u6_%3(us3v=3%KGpUpLZ+v^=Z9eO90WXDqA5(j(7ADhkgU3AXHu+>E zCCiy~-@ey|(!urtF5O|6mw0e2d~?BO{m6XqS_BrF7nqj}{b10yXya_glbzi_v3<@* z)L`LA{+YN}(%TZ@XgaZh#rjOBq3*>&rBr6)d&{x_gyT}~U;=JkNb;iK+jaG=umPYU zgE=;oCjUOj3i$Wa?i(=I+N<&HvG=NOG@ct>D!2UTGxIpI))ej%bZS*KHPl!VJw6#2 z9jl>6ojm2a90`ucG;%QMcbVfpA7xif)k`BKk*HzBqw!!SO-OQkq#jJ|3QFBl=ZzV^ zZcAPgh)n4pdq6z7w$oV1=gs<^XX32@Dr#C8_uiM@U9S9&fG^&Ktohxuw~7-gA(z?M zOksbq(Z`Mi#R=FJn+TdC%28Oqbfd?@dndv7@7#{_cbxV3=#kB}(^!~Oe8^>U?5A&b zzkX*X0$_Eqfca%-9W1!sb}-S^O(&FxYp+k5ak(^8Uqy3Zf6cZPj*~ z1MNMZj+*)PdMU;6eC3Rsql6mY+?M30F}-@`55vPYXT7VZ{m1LB>Oov7Cp@=awtxiL zm^M0?CNxpxHFUw;PlqGW*{wzXe%cW^*U`kzk^%&=XX<}JC$F6NWsFvJzwo-vM}97k zT;97)72Im!w;I-JzBIWl`&01ECp-Vo_7ABIxxGZ-!*^?$S--TLc1mn>;6cye-J4Bu z*rvMrdG}!*utTiKP$7x?`|cK6;n8u2&&*6`8v=+f@_ywb39tS0yXQW;6#7G__s&M) z1dYTsV5=iUCB1V(WoXU5m%ygVkxBD#W=4ScjaW$Jp|XD@I7>-ObX zqB=46G*d74w(cK@-Oy;Z+wN-y$ZQ%@O=AJU$+(-rD+gujm3>Vj7T=s_etb1uN8ph(EFE4MIJ0! z;fCK%HR|i~rf6xaGnP3afo4JgJHuUX3|RxpcxUUD$Ioqs(XJDVER|J&br-? zClp@;a?W}RIBqX}(DYH}S?;|!q}_i7EK^jij=i~Jxq$2j5lvvR4tX`lueUe|$W*Aq zGro~_avL^rbaTIcD|4gPLSDM2v>Be!;*9Necu%4|a*5K5h~E#3EA#R$xwu_aE2lJ| z2b6s0Ni>Ay+a%%B&8ux{-)CevXe}?VOPa2;@}@=ddUqY-iYI0?*pV-hoRQl)4TKM4 zC0SYd>JUQGX8J$nZm-uCr)eQDju&%+8eA{~&Ie4I zlWy3FT%6i@9k-`}EqHtNgoD&Za8#)>_`+xKiYI9i!|;7o*GRzg9fG^N!{BUl?|pXnFP!n({6U(~UqB!vL1Zq?fAhfY=oWH+m( z=NtPkf5(xCFL9ExVIO8R{Dh4e zXxqaJqcGA7_;*rANJ=d*se__(Bd1TZLDZIuinTdNYXY|2dmEhm%EOwo6BSfJUR%ua z(@v3&&(}X0Q!y*=t2UhyuRk?8eM+S7xEhBPa3?XjWP**lT{VPtJ~J#k=50iPOhowv z$Xsf|jNk;INj1+pZgz3CZC?j!vxt{3uS{Z3oNr((Gn+hah>Ql5>!j|Mo^+xeB>WL3 zyRQMyHRlndq1r#tFTP)lW#D#~#lp}`jjy>zv`F(dazP1b0{*X>S1CsEwx|EJ>$&*7 z@h^<&^yJ%k#TZ`lKgS1A0x$MHCQFz7nnh!N-?7T7e+X3Cf1gh&>#bp!_UIZR0EZlb zDQ3=h8++GG8IRgZJvzpvh2OfAfPdfHun>u7JT?c zqF1_BwIK=NcjXON2T2uw#irK6#~G#(bCRzv!A0pviwF;vh`2UW<`pnHlZVV>2`AfW zd;FoCk9y@T9l+H(?38JOb-dcGvtc&lq?b}X2C4$$STumUn6+-O2>4WHf$oL)u1d>! z=cbV>Dz_mLtl8e`Qm0C#c%6F9a@1EOkN!KVI1=4U<}K_wFvU20VvTCGWzUo9ePk{E z&zNxjX}8(ZYdPnWnCz$wr%@gejEk-;vAgZ-x0H_AMAmiec~%0YSPv8Ds>sE3mH8|{ z+Zp8&x;#V3@|*`rDXqDz!eo%qMm|!^K!?_kGw73B`bE$GtN8j}06PO!mYv`JC931B zZ4#`2Pq&*`Uw;+#9-n7Y=uZCyCx}Pf;rAPZrFa0YH)KIn0iQTB{I{n$|GScT)VTgT zi^XGS7Gqw5CO)=4&~SKcCg1m@gU%sjhOjrL;2}(D(dgtp9yGGS0$`7N+Xd?pJmjE9 zG&XNb>^i5!l>GtspG@w{O%-Q#8&FX^aS&2Y_MPyhddFpI_8TXd`|dgkIuYsPy~H{Y zeBb^$n$5A_UM1A_^eh{I0x#KV1ASqz2NP2w`d09Z(@t2*N%S#l={W6)CC*t&Zg`3m z;Zkx>!(25#I6l&&GUw#VJaY@o(B7Y4b~x}Mu=^2XO+$RI4G}cV*tMW`Ckv_HwLd=z zRQcNa(N;Qjxe=Hw!S^(bE)hJXazzlMZGjIC)z&Znljg`a3EcLiLcI;Q>D39q(!Yq2 zd~Z~y;OG_JQU|T zISnvLQIFF?*B^Z%`c@qb4TSkE*mA|GGbc zXsj-MTSz94O*Q@pf(-2cNY>d$v1Pwq_@7Pm@yk229fLP{UngQBMqh2zw?bO@UUVsy z-(2S{V<6h7?>>l`pYIWKjU)jQ6*7@%4lD>hCJft`&Gg4s|G)c|u!a+H5IODB>XkTp zGuEzcB^GdG0X+ZJc#4v_Da!2i|IZbC z9=L*E=MK!0rXECdVpNV0fuxyIw^(7n{vkRNVN0MxCh6*J$j|Mj!|{8x&S=D^`#nbdMSjz6*}ql|Q78&~#;h#2MG3%8 zxrOKUpQy9Fb4YMxQ4dk5c)uH=lB4J67T!y>1V|sBndhNFBpETvdE^pKo46&;B~y{U zalq(Q5~%z&uz$B>!FN#tmC7TDP(ng zXo#f?pnhq_Wn+t$*!cWcr_4`9ko)k(RlgdN=jbK)TyQq}C2%5od?qP)6N8m&5W$^|3jrW)#VfF@~7(2hwSA^dMyv0IaNJIQK|c?*SQTd1!gudaKi=N+sN0H@UN9J*GG>pn|zz3zW~_{Z~JRR(;ei%;Km(xFSM z1HXLj-FeD5ePmx`FbfvBjr|l$D0N`Z}@E(5L@YfeoTxn0OZGDJvz+i}%3PlCV$KFb)S375* z1)XS7?<5ogmm`Q}T+k5oN40%(9$xi9fpodQNfb2VUS8b@s30gaNppTjd7*eE-S){N zf2uP@VRoW1O7#k*$`lH#XyS>f7XILI_z9v|?=*DVm`FbO9E=1LLlz_;$3q+!vhzwQ zB>3wNEvOVPhaf9YqOibWh@lLqN{D#G{p2Uhm&-Bg)K zJ=bR!2`bcf7Ng3ggfnRV7J~l7L-UGfnEz~0-r`L?eh>jlfL6_N64Q4O9hQdkdbc!$ zEzHm8Y;tiwGk&4-x~CdHR(gVgPP5qJr3df#_rD1X+jmuRn4G0WJWL+}I@a&!iAdca zkpcBW4fK~~Je39Lz?LF)My>%(F8mr%Pe3yS1M7C{3Gi;ok0%3woog897^BA_%}lah zp4gni2uG*6QM(ZgEmS?eblo&p(wPkWJA$Ax!rwh`SFv$;&m{SfEkG<(v&E(IFrnz# z@0Mn&{^x6}_rcMIb$|$8!D+zzJ#D49b<9|rvY9ZM2^gh!@rM?fqN@9U$j9)V)H;?{kcmVS)jR_@m!P|tvetFJ#wlOnsfW^@LUVV*#HNljLGcnp*PrW{0QJUNJsp;n`CzX>VW0-Qh9>% z#^(-n#|k~`xIa*I#jk4nqd67K6+~2DPEKo$pDW$Zg*LnDx?aypbjKLU>Je=iQ*iVt zqEMeEXg&vpPbKBPbzG}Pmt<3bA#;xO-#7WL_SXgGa`$?Mn88HGTD zj^e+{r8EvuXUXtTb{q!4(=|3JJj9%vA+sao6Y%?o;>`QQHc7K{gcu74)*sqhSgsqq z2&M0-u`0E($lZNOf4JjA4BWprlv7D7IJRMKn zLQb^TrxMXeuogfy0M;;S;0fWQ5fV+T@LQvv*mmj`l#yFQoWMbW^i!oi^Xl&qEQF1B z7JV8rd3SiylMtD`H$d(9`9=?lnrh!1s63DL`LV|1GvWsy%(8#G)M$0){#gXS#j`7N z^^czc*g%54qSL3z085Vh){W}8H9}LzD}DgH9{>-$^C`YU4ViblMMgjyg%cB+Zm|#w zTDhU+e8eu-Rb4TyqDdlOVEuELZG++_@3z4jM?6Ci!xOI}4l~T7)~T``fZ`@$d-ow! z=(23-yD35M{hONy#ve#-=LALz3M%vu216yK68fjO;j_T2-DWOC!QoS90dWXdm#*gD zOs__ep&LE|FTubiySS-~OOut&v+f-kWHmVrn9Jt@)@ zmqIYWcT+D8EL#(`KE^!)vH%+=mA1Ld6ArF`wql{`GHnds!H<;pTcrUgIxExLJ_r9O zAlE(4Xc!ux8(3oys;a4XT^}LAThfFIJ$vhxZ-rfQHo|}F0hzt}QnIJm!s#5DJh3dG zq3ewj_jn~L~9`dQ=25M0Pn-5~M<0 zP9J=a3p+luZdFn^wREzcc1M8*qpRO7ee?=*8d#)blL0dX0xDkLBawx6g^O>~>&xm* z{h>q4g8nE#l@KU+Cnu?l_q+gW8P!u>IU#<&lH~lR!s1Y8Ipu{x^6!QW<@QKRi8t{B zuael!9sI+4dv5gz$IDJz;-WoN{ls2NV{Zi=aw?N?wK10m0lCw06w;ly7J^uvEN(LD zBzN&GsRQQY0mJUTW*~P;E>6^+k2U(keEJ6=-!ZN9TNIJN)5+}Dn{nf^duXd|4D9Ai zU(4gISnV}{NKLbzQW_&uj7L;-xk^eBV$R%ju4Pe=ZSXIeof|w&So{wC;+v z?z&}Z_A%2jnX-{K8Hns1veig5bOE;1}MXokgqq#m) zc_MYPZzua^lwvpx0QvJDj7w|rDJoryuGVW;2vBgJ^I7>{l(ktYS&}) z|BWvXi>5HChvYWhF5F`#Xm@<+T<_b~#(BmH_evis-PPryU2;tP?i!@e(|ZDLgK|m3 za@M`E_u6&uh7)mDpG5vOkjOf5RRa{yPp@jvt>rxSoZW*13w^H9U^taPUl6QZxlu-4 z`7%~Evv*oLaR!{0QLA~*K-Rn>3entZ!QHw&zb5))Lo6Pl@7DgEHKJto!`-2b!R7M8 zhu@#kn=*A48S>=95}v*b-Yeh$l} zVp?jK6Yty%pjx8HhN+cp4orhSv@_fKx9vo346uG4G`X|B^x<@s0|cPt)^6|sb@MyG z^G(WUDemc2x$(z790ZT?OMeT362thb*`^T2U$eU>BvYpV5ukbfZAHynD=vTS7}EYc zCLyem=YvKmzbql|>6N_^AJgFlSY1T`5;#^Y5V*Szoc_6j9k$QY=g;2d8r_6T!mQwh zPuFfJnxvdkk-y&Ba51ahz+;&x5L#tV&g|LXB^UYsypK%43cuYb0v2Hb0Ze*})=?3DB>l;{4gEMwUHH?iklu!%^42b}RA1Y$CE3~EK4s6Z5oHcXddTG#{_#OX?I zM#$+q8Uk0ju|6B61!0J=4X*?BzZlDP7EB6}^hrFm+5^AZBVp!tOz#%N_!T0xx^1$( zh}!$&prtFm-LXKPTDefK;#>uVwn2xsGjaDE@6stM^Swx1Kfk1B5uc|c@%U7kQsi9^ zG)H^B?X;vRfg9(qa1NrZAmDRn#Px@An&;Uyku#^E#$Y@_v}-WVn8oFYEU&<6EavXZ;HktpQ43m8`Trr*)sW(|eKxR=gP=-^gA7Ur!ZJO`0&N+Dq`n z@u_Xc<(RKkAtc@)1}L^`kK~#A9+?KLRUmSiH|9A6qx821U5Q@WNS&+reA+M+B!oiB zy?hN5HEKDM50Xnw zV|PpQ7p}e>S7*axOTZhnsh)N2+Y^j7n!MW`j#As>4q0FsLNnDML{wN^*;aHpFysmt zeCs7h_E`8z4aWVOzCf;WkG7-arn<8Z2U=y~N6{x_;=29x2y?!B&XvGc_k8EvuZzLGSh;Px zB#?e448YVz`w!5XN1b9?${|$?R5!ibq;N3Xz>+u+dO^AdRq)1ddLezqc&w= z4GHuE<*R`f@q1Z66#A2M{Amz%h?Ym!RiTtIMF<)TQEs)pqIl#mDi;J#RGj!?q{s=d zVE7oL41jq#U~`Z5c+M&N6EA9i@&H~Wg-G5v`~Ku^16L_?C}1pTDeB9|tucvqU|rC) zT1)Uskb?UXiakya8-wv})XFvI2*Zq{8FTkEIn4OgO~@vO8t_Xn8QkF~36KG=<$^s5 zEYqe`L&5qx2Zq|Xc?im3@WHua9N)S9_*{ii@Z(-R3B{ZJtH)M~HWh*{+HymU9Ar^$f97A%cCx zB3>54N3juF`)R@)Fq5u7e=f0SN56X5{ON*=lDv`}g!gPRt2-c685MS*>7j7eyVz}+ zFrZQ^_6)xiCCN8wB4H>Z8P{idMxnl$Qb{QAZdbqgrE8VVqt&|`uX@R~1BHd?d|5#R znc+;}beP))erDu^_2UM?^}jvT5+c|yoda~` zJld^IzSgyJ68=a74Q|7v-)imhkf;ErxbvdUb7Aon&FSF=-->75jh{qV3ZSQJ{`?Ce z{wBNL-SU)~Sau^uSZ)fHNNkNfq-jGC75IcPM8E5zWR5r;)?mo?)02GUArue0k&fTn6Umfny z`)e+T*fBfCDo<~hD=+_P?GgHIU8i#uLD9cu&9M&QLASNO0zewzyhPpcJ+TmmD zDd-z7QA=!0EZ04_D#({{e33`DU(VTvM|6Cfbdw(}odcx>MvZH3FRJ9McdhxQ4{%bdsTQlYD-TRE!m!qi8hFba+4rq6qtVn$vHNbK z9_PC!b_LvEJP}FCp`yQa`54uHGR%A;;11uUkSru<6PS-6E`b1oYUO@NVyI?oab<#K;usqO*mI2y^&lTr_&o|exCtcl*J$Y}Ymta0Q8iS~9 zqrC8mKR>~4_;+7uUK<14lRv2hauLmwROCd*mn3?)xA1N0_Y8eDI3-hefFUkmUbK?i zbvFy0n&9smrcb**(P+|IWk5JM!Cs-##^kiD!;%QNS`hb+ao)8Su~xwkA=8FYdk7V~ zL+5cz4{jwv4U1(hBQKFD0e%qtR6M^{Q>~FjR@soa0KF$z5#|35slAZZLqr`@41DmX zVp-oyH}2aqPB#N!7Z3%>;ZTa;OughJp#lo0GxKpYc1A4gy9pcr^EAA9ecube){anyx~Hr6&9~t#(0?Bgc}&Gcs&og*@@~j^NhK-(tVTaO?ZWE?+R-S>qcqRq2bN zCUiWm@|$l^hK%ZcGHm=39Jg95nCHyvZKgnWD;D=nS_2-rTtbSB&bE-0B5iUVJ3ToR`Sb1YMRZ)y_h|{^e^=d<2ONuUF;MC|*Y3 zNKA`;P$E!=l0Bv2@ZBlT>EN~spx}=#bUHP0HcU|`n=_bSG%OOx=LAr!73TQRUmv_3%j_@vfNJKs6Svf=!N<&Dm(0ZoOc)J=-N+Q2 zYD2_duYU=~uiYi`yF*CemR)T_KZQiC#ei0?R$I8?r(BKvqBY-y4@GPm9{dHxyaJ@P zC!EOd_;H$9?7&}z__QaLAyX{r<)*@++lo@Y>_Mpq`z%=R^~dx=Hl*Cv>;2py+U`Th z0-ri?>dF;BUy13ZPVt+w-*I(Cg#GOm6~= zMqfx=DPh&GI;D=jvwSSNGKq~%tFhsIvDL7P0qY(eC zC&PwDvwKGtBO8Q~RY8nkg_Hiy*3|RI88h~i7f$$> z_&Fj0=}%dCuPg#p^}~@expcJIx#V#6Yvf7KjnzF46;7dOF#Ng=9;7d;_gsm`x_*Gq zeiK+lZ~fiYMDqB)AikKtwTO32aYXHuw2eFTKt6OZptK_Dcto zthX>APDBgwwjXfDE{`lOE|l7U-$g(uK*@BS<8FVZ5ErU<8r6QWxsL%%{*R84?fy>! zc^ADA07spZYxc~He~$`hyzyC(e@{M>6M0BesIS2U`1aMIg2e)K&mWt4)=PCoTGdp7 zFL%Z>#VRmwP347|-l5}ykBGX>&T3@68X~$`d0gH#RX&EIf1UHusy)%mG6VrfX?sx( ze)mRdwK4M04O7zjk`=YFOIHd~rD}uT2X?PVc{%gj56ETu$Me6z4bO&iC*8z0gyI{i z8vWgAM}@lsAwlL0#GhZJLgMHN!(j-Qf~|KHIvq%MC9=Mr#RDa&Oa(K&m!pmqR->7B zH7*>WCsL-WS3AiraB3yJ#}Uvp{D(v^{gO8EL>~)V8dl9WC#+^%kGG_$Zii<74eKki z0dQH~gJpI;I&Er?Srqfi8z2Q)psbQVF)E#3H33-^hv(Quo$}O|RwLVQe@tpc^0N~Z zOZ8bOHFC8~)}Ccc6X zqWnx?!mo`BSf4{)mR!ehH@B~$i8CBQWdDlq_<*=Tq8R`S^8YrRxbX~LlLY927Y|3} z{UP!3jEYm9mOml4f{%LZ#F?L?^zLkzBy{*(`YOM#Z9rxgbG_vx7reg-z{l6DEk$>7 z2LYPKfH7=-((i?d7ij$5S)~3-wKl#VHiVz+qMWNf6K#j=*agu!IY*`O`Pt+57cYL8 zc4lkzPQ)!oBV;*U+m5gcSsgdpUe;&69M@OOS*bqUuw*+WpSCt<8{6oVN6A@Cw%Bhi zt;bW-sdim6Qo9>t&p<@W@c6b_bO_{d6<5ruO`yn&4V!Q*zuYBqmT7P?$k%k2>O*jQ zwTtw+t~i+@081v773k_mL#)RnTt%uuUbb^HE7Q8d<|Jqv@3|+AzmKVt$epKNkxN^5 zqCowwTDmMOT&{k@#r;|fNygrc2q1A*RY`GeXD8{mk`|@5v{JAPVcfp)9Wna4osIBJ}82|>y2d%7g&1G)ICpil&V1EKi zp#Naf_H2F9SI64yt^NxpxWphS`23^-<>Dj@MXBPi=q*=(r?FWb177_%ZWm9!RiAG@ zR`)K$){rPw2wJyOJtAhIARq?(CB&EVAKTthFxbPw*S1y5mAG7kYn9w7qgHdHCZs~n z!uVt|nnRS?`ikB2s-LTfH|IAm@vWG%!nT00%YeU0S-ZZ?sco$u>Y^?joMuT^cbF_Oa8;Qsej&xW0pNijLo;%7ibLE~hun|LYy3|gT}Gx1sv z#+R(8+AAhiLS1N{rL_>mKWsf76|5ExrsViUK$)i_2LSI1LdtA25>+jB(mdD1BKWP2 zi=FiF62wp@phFy4X6sHESw537pWRK=NP?aqq2BeZ1YUdTo6UTr)9mxQ;?^lMJ-y{o zs?N2wH{!76e_Q8opB4$s zI#1U4kDTGY24#J6E8R0TG>)U0noMLCWYhU$nCB4euT9VE_mG7zA@3IX7oo(kksH3C-teB z4XlBL0=f*flU4u99VZYj(jfb`znJZ6v+j6tyJz9F`D@%JQmFnV1kx0_2ZNs$$y4b^e#AuI~%Ai2O0nXZGp_RKW&iQD2E~YEZg!i)tc>nF1K6JktmH&O-oaG@E_bD zAQNwRI@XxnV4icB><19XtJ{+CfW->OwVu02=r+AhV z0Q>iguEDfPv!d~H*4>dJUgQd7#PrsULRGf*((n1(sMp3$r>Xr?>sJ^24-7|9?(N=w z6{~^vF2T*wf4-}KeP#K0`h0H#GIwV>Y)Qh63}EXs$fz?!juw?eD>fRJXKfZQ*qimyr(F@LY=fq85;JBOlXsspqPbb< zb%&h>&|{zzBsJuj!2)`G4J^pBcR&U0#O+xfPP<{t(5p$PxQzd#V&u9Iz^sYKQ6=-% zZg@Fu$deCsGcRLO;1`#xF*nQ^mGzAkBola30f27gbwH#nVXyzvJ}c7PzErF_4#06} zC&!XLA5VD%zE#)Ub8kZH*wsJ43T@-b14?ODUnE9zc+h$w)@8ib9ZH zuk!g$c$8;@S3Sxhl*NUg$A$Jz+L%*DOia0&Q&+w+uH7e($Y*k$tz6k44Ex2C(teLJ zhoT5(NUH7e%SEu;dBN2FD0@f7K)vzUtz_;_xxJq$8ZF{Cj9H+Dx`5$j!_NX2N_#;s zbHo}5m2Wco3l- zB;h|zqzRNkwv&M2L}IO#^YcWC%@sGJJr-)4bavVkFF5%+OBx&B2xFSP(UPh7b)MR_ zL_@x1`XhEI$|GphFU;8RZQml+-B1i;)&O;dIH7 z^wc`U+IBf&BKAmET7KxmBh1n==;U+7(MdJBQ=2z}hV54 zcaJo@Wi(&P%_1P@Q060J?E<#H&TeR#Rpr605?q>$n36sc5=RMKxSiw5k5m)I3u8}&-CwM!~$jMo(VA}=MsO7K)bLgiEvSN473(Nxlx1=`d0|3(+6^_}fB{i?7kecM+5hT(>73Db5c;bJqW%5l1l^LxBBc;Jha`3=4yQryWV4FLa9v%=v0SQ-zxRH*vqaPQIlS-;q~ zgDDjmgjliByz1GOEI<%(1ZehjYq?3c5uM*K(qpr6ZfC3_G z67*busGsjZlwx2YGv%E|e`uDO&Ci~PzrxeRlbaJ76IOuIz|WGI8J6H4Udq`in+aZz6a>+sK|FIl;Y?Zbdjo> zxWpaYP|cOpRGS~_D9NEx+pCdeacYvyNa2Q(Fxo*JKhh#nVo&^W&y(R4&Ily3PKn*T ziMU8j+v7GX^p!-l_8hl<*5pkj$RbTU7=;G+QqY7xfitJxf9wfZXDwli%Ol`(Mudbx z7Hf7sVK(S^N3Yj{qfxF`d$Oa+|5;JfAc}%=6ToOheEfQ{&K-S#WNsk5tUgb?$8k=HdgQug&|1Ksgto$TiGqRi8G#f-*< z^E`IfjDUr~kQX+$harT`B2K`@?TmwIj%NCS-=u4rFBom$rLs2^Yu@X7y_rj;mJNC+ z&Y=b@{`=A8B}oR_a5kmxnzZCiL+Kf)K(T;;?KPiQ%;;^gGE?6^QqiC+(O~49Urzw^ z+$%IoYQfc;T;?-l;Py%!x%3`IB#a;fI*zctI`SHSIHP4V?Ar0(BR{bti9Na?7v%1z z95hfJVJqjR0tt`OqEnC!?)ho7%cPyO=yBuj-D8+qjNP?Pyd#zQH{r`ZtoTFhpAj}c zcP`@{)LGNp zf~QQV#im*N5uMskgl11}s>=`EAF{F&sZu8cJr=`6$q2FQf$fOJ>(wdd+fVa2kJ2;J z&BX`X)3Yk^nClK?l2(A#ksW zfsB+o70L6W1*<7g()UJ}*krk?6HO$-%Hg5SurdV|a(;R2`l~w8@c}l{As5ROJfyfA z%^F}*y4cNfEH@Q7`y>hrKM}4n7!;!Q%bj7rPt3ak3w1vkwCjG7fcqlw!$|G{b!Y4D zV#~-Qcay=-VSVv^&6h=ZVx(88qiL0bb6 z^d{3;?yAVNMD}lx}@i0QEC5JGVv&oE8{WX zXUhJlRQuDSNT7uCw_LfOgUT>me)0u^6WS)5dhTMW1?(4mijHq|-&Du0#}Y`g)D@*= zU>P0LL!$@SYbdP4Sw`6SQ5=wl0Z!WZS=>S3Nl5df zw7ImQ)o_p_egJhx&=o$a8>4c#D8x`zyEdxpGX<{F2`{IgNJY6}rm=^70cCG|4_(Nw ztO+BLuz`Zlsi+_jg)$;rE3;C)jc$xr32<-2;df=eJz1h22+{jx=Y_7dy>09X#RG)4v4nP$wZw z{P`J5vxRe)_BRVUj_n>QsW@DTcA8Ja?|p)5-A4}9?En%Ux7OFAoGy>l1Z$|2QlWN1 z!OZhU652!S^!F<#Qp!@Emr-L;AHwi|exa%Qlq*pA=+yvgE)ACk`Z*OR6Xq;QHf;Yh z?%ThUm7;Tfo?NV@pml!^4a!PR1gW5x07sT=8q#*936qqR)Gt~py0pe>0UdbCVIBnA zE;kr14L7)kW!Wiith3smiGlF6VF#Ni9T~BDXGtB-H%Q_&1F8?xc=ALjV-e>+K)H~* zMQwMTls&~?`rQF=P|^6!PT|QIvQrqT6-o|0WDmH%CTf`QENj&a@ zl68Vin}39D*W)IMw^Ge9CymSz!sl5;jx`)@J~v~b7Vwm_|3P0Yw~FE@ocqa(UHV7Z zNNU1;z{Q~aSr%cuwf!{2+iI#!`p)&ZpJh!HLb6HkAllJRJ|=m6`XV#O%2n%$84Lqe zj?g!SAuGNKF-v|>Re&(0z{FHahbzLiPc)2M(d7~Lje*(l42oW$QPmU19jDSVfCNL! z-4qQ<@=UXg#M~v2`MWG_bd9!hiMaHA2K^sQn&J247(vj!bHt;uIj0&k>%5r4F?I{F zm+h~UnYv0K(52f2ncY4rC|~m6Jibydf}VO`efq{5^;8;E=` zrpQx^?@7y~(Bs9T<2yopH60v6l6wotb}hSH##}N$*=bP;46zMw4 z$9RUCNNKW4c3?c&BcO3>a5~v&R^OD)(b_UrnI`-&ah>yO9Dy-Q7t_P`0pxJm+rt#? z^VR$9&and@>m=)!mmWuE-k&Em#S-!z{YPHrTEeB*H{>yTDH*@&ynU@n3ies{u5YsN zgM%vHmU%p|e*S#N>-3IJ186nBOSS#6JL(IEQA*JZ0=^J|_uxeh@Mn?A_M7k)_WIPVOMlJD1*;fE^F5K$+=NuJ-p4|(o+f`gXAm_(IqR!?4$7a5;F)9k5E zMaaxJPPhd_cnbc)RiINnXv!0fB;rvtwIDohshD=Ixr_4%C3D(;ct666q>hpesp7Ls1hKhIt7*s<9cij+RP1G4_^2_X=+SdDo~BJ_sY#wiho-5?h=QBS+!xo zl;!0Il=r|bX%R;=mLS1PHb@#nHsBnP-yi65*E{nL}2 zH^(wvtW3d#(#%kSbF006QIgm_J*^maW4;Ql>ICSZ(Qs;0?>HCPNS$Yg?A_~MIE~g6 zW6OfZd|#g62BjYp{+!<@cMVLx>b8KM$dp}_7P&H`@bENZ5e24cw@;DcGZjCiYkco2 z6|J^A1{?dOO`sc%Lu{5xIfx|XBY)(}GIAAX;Zx{6s`V|0b3oUgW%7X;lN@>VJ8$4w?CZmly1j!F<7bEN z;y=YURGgIa)g$Z9xnNR=gHw7Sx)VX>Jfu7+>~LoDs6b7&jJ$F|xU`~jUPWxY=RI%= z-v&62?w>V*>DRgC(zIDQWcR>&Qs2z`lzE$OBWK6#jPr}0%fv#Z~mgt<&#!Dd8&N9Oj#OozB9uVLq~%d z>nww(%X3a$!SL~f+d%c*2W$KQuen>Eos!n8A$d=sSC3$=5(+WYs~>Ypf-U?XZ1Kab zvFk%*jxQ*kJ}&v#3kBH_gm-ldi5!keM~9D#kBL}z?)^<*c_$ssO2eavGiwcd3eQnx+Ces&s9@>r{_ga!BAz5c#xd>tJJU%QQ2^jl)@Sl{fnF7m1 zVL!BxEYDs>4uplGVNSZJ=$8t#s%wgDK6n=ANrYKXG-722q4~xBm|7nO*tI_a7;P6({_o= za)nPrD_D}bq8#m|FK*WPwoJLiWG#7<^7q1MKTN7O#ktmDU7-RkKIx$?fHiI?OXDk( z;d^E})6=xhfpl^uW9JCXYIW%r)^$wP$f9uX)W;B0DF-78?c~CM?YwVdvYs<+@3fk~ zVr^4|+~O8@!iA1u45I9RiKaK>PZF@N`dIVNb4DPINx2 z{psPF1ug`uSOs#S>OF@goqxCUQl}hC?_*9)pXp`cRI)jX;o{20RzEpAyXx0^cm#y* z{mI-)gHDL!m1Yf1bpPXn9O$UBBUZ=h&(*o z^0vcu#&jALXrJl+7KTn$f7e8(loG`hlNcv#34-R(R8mZ(nFFUnNS;kfC}y0cK#K=s zeHzw6V)g;Er!_dB`tUcMu*Lv%-O$TXk!ZwAlSF>1oO7qrr1Bp(vhyFL;~^!WG}+Sr zm_@-gQ~z|umpt&)#auXJ`_9b3DX8$2H=LLIl`%Gp#+uJzj3j?cT`Ck$H7AM?vP>^9 zCwV@jk1Yl&^0}4hE|o1&-39KCDMAz@Xy^SU;wU+~26H7a@K9)H23jg%mfmr3^N8{7 zseIXF7UK`@*ZaOeYYZF|3`{OSug>|!i+Q5g>JFD95=*wT-sx{PT@dju>2sxwCmN`k z%>s|u{r$J}6=X(V1Q=>tRM5epu+SqVb>h#Ok`b%zj}AxU+Vc+f2=0H*0>LmXwra3X zA$f|8N?H*4e!%C1bPVTTvs-q$2&c}}nl;T|+8?+j2|_#m!e|8MleYb%?6KAlTTxPb z2D^{T_?;y|<(EJc(+nxihnCHIex+nw<`0C4I@=f&D(g>+PAlcBMux0I{Pd@>w#U&Z zkw&54{T?1l_%szMupcDR9f-tY^~fx2q_84LnSr#VWd)7)Hqoeg9ucVc$Ce|4b`2iG zzPh40c{pjsLsNY2Ct z>x+;?lF9+`F*Lpf36CAsAXHDK6kQ)mJTx~_t4o`ZT^H7zC(B_>OI@UZgd_IDtgZ;~ z1FWHl?UEGyojJM|@NFT#B6Bsa_<6q`?xCa0w@j%J54PKzWEkP&#ug9qw)i8w-VY@% zXsdonT<3^`yv9ur(Mkv>jW{^6i4k_O5HOz8$|t})7IvX`aZ-itCPKRS49Z#2Y%EiU z4{zZUCmjK$!4V_AzKg(m4v*;9*nC+@b$w7(GDC!K*EY#-ep%;5*5MpAKmsr9eC8QMu(U9+9zhyXiR)$L$tJgE($ z#s9E$7Hm;=Z5LKLl^D8VfB{KSy1QGtyGyzm7&-@#lul`+yIWd9x&)*{K&kKcc|U%@ z92|S!yVg3_b$JnG(?Bo|UzJ(*=J^vI2TelTnLc3rjsbehD{36&s2Rtk{dVN$Q%K+ z4R$|A-`V=!PLoA$p*?Jb-O}U=Z6I%;Z^XUf&zWV;MXxAM4o=LQmUp|{mD{@X_&GtM zhHo)bAO-Dt=e%x!L);efnco9rrP-N?kT5w#T=0K3=H zxm_?Rd-~DhIHwN6Q6k!)yq;_#F9!#eq+5u3`oC#Xf4aY=68v+7t3{z3zAXXjlcrys z!RlZS8N*H`&x%R&U*|*JlkD{Q&0%{6HYITd`xE&ys>v~Eo+-bMy)x_2_Q(G=qJbes zQi+p7IOdUL@JomeUCt=QTFMQtjR~aB?$jISr;W^KidCRS!)N;Ic#rW@;cxgt8AmY! zpgIu{7>m30rL_4@vC)Jch6+?2h7}K|Z7+Q@r{zAZ33|HJ6QCJokjrqjK_O*#itRJU z#Do`kRb60|2Mf4em}PzV;b8>}M%Ap#v!r<=7o}dQQCtsCF&hN#5ms_rEkmO`%%^qq zuOr!stobPGtCgC+ubmIxv7eje66G3_<;H^Kgw7r)|lMi&jVyk-u2$N^f&E4uYr z>vr&-|Jmnc)e^SSO{$yI47YS+^hH$jdXF2lDX`hTDX`VPCE)g{2vT*KF^$)U@e3)f zcm$;<8IYSb5>o)Bq!4(2%4g-WBOF#}RWUQyW+Clm4Dg6u5F|<3p0qaV1p4w3e%1kF zB;gvY``T92LcGV}eFfGE@$6z*1divY)4YWIUwOuhGuG7UHp=`Dnj5J+jn`P29fzA& z1FlhDWv|8cR{W)*X36CwqA^B7bxx(HSI{8@ksGRZSQEZ{#|%DcVENT4@V*7RInLR1 zlLImG5C#9p@9Q^7>(c_r+kV7pf1i&I?H%9FkzTGX9ZU`lAg-`G6X{0kY?Q8zc|tfk zUc9{?3EmZ(Y%c@9yOUolRJdh}63@aW;u#>5SfB3H%6-wT9oX*cs09VX@EI}W&>wz4 zM;u7^GDmVKfHnlg94~l%X3xD?dgY=%SZhfY&)JO)GnZ?v8wLB4L;=Hkzx>6}68|DJ zhKL=*pwst52U!MJ+j{!Tr z@qGL{v4#p5T(!&?bgw87nwTH+7d&E|OrlR2N6<$pN9F1*ICH-y43>@56N4ESYmA9> zZO+!(8CPQ!dDzb+gm3R}&vq87^tV4J7M)XE81m7{+xszS7LZtrE65hjnotRP=jq>u zF#@fQiX+wXFAdw=ZFfhLQNMrr(BI^^;-H@aUqS~SQ)pD1NiVf5b11%`P{r1^Wx8;u zVI-HrepCFKtd3dy+ZEnG6Mtm5gj{3nOue=7pyW(NAUjEXX7Vm_MWcR85-fj%DOo>6 z2<9a1-yEM746@Gs+KrnMS4ykq>q4y#8Piu6B4W4Yvn_?N^+s>w$v^4o)5uGBZ^po=|bu+-23CqpD#rC-e{!6-ZVpouqThOc!WDxC<< zrrW&pgH{|4P*4J8T-y_#CmRz+>c*m;1R_+jxR4}NySCC2)@@0t(|1n*En4pS@Df{_ z>8O*VcUlGB^>*z^V(J;nI&ACmaJyxx*Fq>q6|pgoE_d8&=H>#Zm|miabAjrG@|q5&=^*aOE*$ zgd7v|=_F@cyqbJ)2-io%t*l4{wPm|#EzXI#)f2 z{tWL9j~LRy#<5cO@8H1Ghn-r0nOIEZUnBNUg|3uU8)KHDU;X?h+Uj=0%BXjTCbm`C8OxWyHvnerqJbAp z!^NR}UISixv5>0cjC16OhzRq6NZdYgvjjlW7!7}US($Vh+ShS->9N`Ka;2sOtOdpE zck2rYrufpC;iE5e#0r}#L->XBD`W~86XUTjNcRD=?{ygFVBg9pLfG>2D83}6A*`o& zkYbqQ4)gBeLmlLjZw|rbVqI$G+X_+PNxwF9Hvtk>s4uCY#z^beJ^}srPr3X-wp^1z zVPOP2gIz0cx-R;pu{JmjibRw)=sHRC3q;$nev)P@v^3Slm2EMNqpuLI5Tm2nm&uN! zt(22a$d3$AQD}LH!paE}tz{`Ku)dY~v9BZLu6#6|+RBP_uDvx(Fg)EPJQ@3CVpYp?(uTi_ziTBG#nw$NI3Xm58mvB&y|vz1ley zB658k{2OH)m_C)TkBuK@PFh*u@WIve^~WFgi5!aAk12W6pJj)HT$X{6rt>HIal7;1 z7$>fsQ&0Sm!i)l@kVyr!7Y^P#Uj9m3ma`ky%Tc03Uxx?s44YwL^!;2x+7tr)%q3u} zqA)L-H}Ozv)G$$0N-~(q2!HQ>@UuYVD&Omn#~3g< zl$O<_iA4QR_7Qs}J03nS-oKHa#oBnjs-|W#U%&z^5gm0so!-6ev9_&$${`B-?V)%mGPWd+;7SzfD*{m>m}87%Y%+17SFH4oR28WXJQm3(}n6gfg+?i1a`k&PU%9gVq!kls(w+9Al9m!y+>(pOQbCkTj#fhht z@>y&KgV0y7O0ZOxRr+qx3Uou&-vYM-C;NQ~OutWAZxtNCnQ{6f!IJn*1C-uJWV z$JoW=k?&e_Q?_RkeYNz1Dux|J>%XEA9HRH7lvKAZ#OfHqOoHg6sE?ygt1bC40@3vn zvu4pB@W{kmf4I?_l0laEz9!!^NMa-Le{NOS-0bm7MWi_!?wR<66dJoy<-~|!UnA6E z)h^FI@-3^U&0~q9?Tx(qSZGnScovwZ#aqq>gj0AjZgZa}fU@@XAO|-WxZwlV=~@gY z6H;1he77y^YrkgCZmi(iro0j}Y~N2LL9@36GYx5!MDB-v=(_mhpWmE58y4B!N4$=W z6}0ziP5mPulXQZtgch<5LOEB4^8l4c5MP$^UG?e#O);~#(O8eZW02M_=!^8p!w}?-E zC%>%@QeU>B?n@WK;s%GqQ}0@H5470=5e_@Tep}Y$!-9N;gX#kX9|=*D3sj52fHh zEv^n!RON+?I2d&hg>vcR0g^cyeD;4{=b&f3Hi>`MyWSzVkLus{SlO8+Xi=>8w4cfo z+G~7}@#q3rN>80Qxv$3uI&Lt$(E!|YbiF4z2goHR7YV_Ct$#E_8kK9y zbBA!35D%SDM(i}lV|u+&i8_`+WJIBLF`yQv`ppM~nBox-F{x#0$VZkW{%D2zw}^M3 zyghk4ir!6ta+O@fBG9V1YJPb-S_K}=dGiScfDAPLF2Ms3YfWSYGu=VXBvKa zo}XHzm}N!J6bQFk0zvf|3^GT-@>(4D zw$2<=;=g9iy2hUy-HaN$|GQ zPjP};&irlgalqHYkuqAFua(%IMn4X3bn3cu1&68xv|av=;U4ogfBTZh{AileHyK#p zh;8tbpOK~E0^4tc!XCd-cUih6Xa-vm02GwY^tJkL3|EfW3%iVEhQCtCoMoaHKEm29 z=Q85%>@-P=Y_l*%f6#yL_7oy^AesOT#hUU^t%DXeh-q1w+;7bkF$`Ph&+EkFP8lGv zS$T{&LgB=xJ=4ewx8~{)Gk|Typb}#}wM9DRYPInbS|Fl4zKcx5oMu>RX* zaX_zdl=YR-^rz{pV|XDe8WU@*(gdody&)lpNMdGv&)>tOF!A`DtjQF~zqyR=PuOc0 z{;K?PP~U|4lQIKPxlHSDQ_OW)4pKLuvl}|+>gIcTrSNiMjFve4=?2W0{eGc+cmNBYOw?y2IvPdKX|aMMrW_b_-A>JT$^ubv ztM^&-$I_q1ZXDLef6RBe*qUf@7=K{c^Exboad(-XwVRXL!&7KhA|eA-Rx!^M*1rlZ zb=mtpTF&1d9e8IxiDCXX0mCrIviHTz^rN;5#njCJ z?1O^=7IH<-IDd`{06JdyAb7IK)18DjRvbB>Iyk7{RKG%tH3Vo1n{Pdplg{Iu!^pq&i|U=#%ywrqo-y(KlS&^1Z`dqhU(M_A#~CO zN~OLWys#@Js;m~0-hRC_^wrsW(OtUf->tz2mu)|j)co95>;YufeB`K)SD{Kg073l57>n0dPLxdB4ac) z6s^w5SxVl!`W?qgETqSjkWEPhY6uc45+B4Rc+(``>lqJT1QahnDETbU7 zr+&|?n?alV^VE0!H(_J%Qtb#uE#qjk@U6MI;P@qh-`g4S34sd*lZb-`@uzGZ+P{2F zWX-zD=_nu6xRrC)V)Ex!3Y2X)K^TEVHJ$;z{N~D*^CQjHGN!lhdF!!c0-TR|e5mY> zzGfCJsdWI;FgysXh$`lKRFQ;tpWhLpv0bI3L>l_zrl1CG5P>bPOOhbdZ@SA&!9((e zk9DD*NhQ{#V(NC^s$djpUQ3g|y0?{~eP>zxPQbP_QzVCvgp3@e7Zh7p;zxv;^pUm; z7~Q_CGJx#WX>rLfOF6AfM5+@6Ae<^2bc~vB$!tb~!fPvNUhsdjQEtLxGc7tVu+`7` zQIRYS*sTGOFIs-?4SI7m=(9TUnP{sa8RpwYr38P94e6K*?Aqt-o$H!YyaYVZdtP3b z#7z^iGF1Kd^N2NJZ)N8!vs(>K zrm>(sEkT_@@XM`REw7q+Fmv{|;#mt#*J$T+ALc1l2C6%sizzc|Zo5N{AHv6ass}0$ z_h(u>=$5K6ZgESsA%(91lbRydk*wYL>F++ffuD=g0DY3^AS8DGO(I4)X&oe1ZF)ro zlWurMw27!q@zn|^)(S}K_9nO4QRTabsonl#+6~>MMNutCCdVLQ#N3|4>H7;!^e19x zZaLU>H@(N>iux@T|LxwJT5GvwcT@ba@HB$xuaN9F8|+chKcp7o`>N>iW)aj(4M=u> zrAj1%Oy!b>w>Yr{D}eQ+o=%wE7=s{=kl0#?uiFXsuBlrs?Y!M`-e!Y}KYl8HPn+76 z$U0}8X*j{omav^Bc!#M7z29Z7AnmrOD3vj3;L+1oS8KG=^0v0 zW=8=%opu1Y@>@1K`$Anp zS7uuVZZ_psAXQ?NasIS2M=SCeNdUjx*c5{ntty)1f?=OhxTRPj8gevD z*eT{*E;*8eWk+)U{oEWf#m>U5SlUzu?;ZZlzHrt#31O}divfk4dO_H3n1#i;NuH;p zX)&Hj6^do8Si~zW_%;m&x*4fzq>oy?W&Tc;x_B1rGWGbOL7wjSwckni9H@_O11s{)fBoLyu*=YxrDs8 zH@3B=Cx;%EjM}(AKUDE(1h}CNQ(YPfk-u{(SM?KRSW(uUi=vh9SG6F{^ba)Qi3qiPgB&m)KOcW zb0ta=K+kE_E`J{^a$ePA9=mx65C@S-1Tldb8~U(_IYf2LxSdubBO}ooZ0AtUI&UeB z0)BsRjRlyb0);fTzQ)+w0(wXXN?YzsG(I_bY;7%v7zfyOo6q*!;bKkh^m5SCA8Xvq znQB9UtyfJRM_OX)-npeJxkJ$eZ^DWr7sX<5%twolNE43OE{MgFI3XQ2S~2_m#Z1w$ zF}twtiryDKK_kqYta4>Yu->=`XL7>S65RnYxjoi8NvGovpUt&^Xr{h5^xVQ zM%oTM6wQ8Mz=`>VT(IaIQijW%`bE^Ae-poh>XO0e1WsW4qx4#+pg?UN06oJFBT57e zU(7+oqQ=6nMm}N7=qaa!ya@RFB9-}NO-I3Xy*!UXZHefw`JU2T1Ul)KH8^e8_)!Gs z{j@`a{<5+nWyky({hIwPy3IXyx#}TPiQTjwj($-823E^?ZI|3^UQxjq-#ur^A1f_x zF9O9lb&f@546CvN6K;*kH=|wc=prj!4Or(n^yGq}cfp=N1D_ioZR(1UFR{lX6!V9o z-i=PYK{OHrRAUdr)FOJ=sZp$4z5+v4j{6@AgUoeKbYqRx=4_xy*pPpm;>jG1)cx+? zGvn7N-H0dm=RB@NV)>WK^f};ALXh&L#?*I=(H`TVNBV6QW~KmtAiX9Wp+j{zsdN8As<;8cPvUU4c#wNFp(wm)kR1&p3XO`^Ir#2o z#d)U1)x3A)t|y~a0h=pE+x_3y@IN_;zoe|}9V>s&5(owT)xcQr!{pTHh5oX;&isLb z!E%RsV)FCjRl>G>6>Bn)|74pUcz$t9ZXXWG z3NGPARkYK(9*csTsG3`gv*QP{DYnqiouSMD(VO4nb#se_zPJCp3$_xq`cy?ws?AHW z>X4FG+tok+sNs5eN}trnGv+4l27n5w*+p_!v3X!QGkT^~6+qRoB~@GWpMVP%)L(;2=cNstDt5yiem&`ki5SH90c0#HVpY-FD4^98s0xxl%X z0ziO{HQF!Mx*w{W9-8>0qemE>w>9W+9EZHTqF49?`pdyeDvgH0O6@-5wlFwODV8)~ z_xJjcb~6VI8$xQczaS+MQSK8*s23y2JqVY6+wVq0FQ0;Al7aIybn?C0{1gYc2pB=A zeB(&pBZ)B;Vzo_Jh{Cd7XYo{_eDy0gRy0ckKu>PtrPxc2F}i?+>w?IPgNG&DUmkEg z`O?hl4|uzFDdg{eb;q`f5DpI3PTpPJnXagHGRioSi!Hu4RC6%k#5Jg1wDkkoA+vBx z#cY~@QNd1BzAGb$o~tjFN!%>mb)+9e)>x*HY9!mS#GLHQ?BS55^-Wy ztupQ~M8~uJJ#L?G{plS6J`DR*HhxoOc*g9Fl{4(=WXm~9oQdoFI)Dr|M2N=}w!-2Z zYMAD=HPlbGJoeCo41q6jiunQJINAtbL0Rw+X z3ZGbT*OkHK$*G*>cHh64<+gs@H+z11q_vR1OR4F8(j!kkHJ?rW_IaBV+80shwkK~& zTCg0G$4;iC`cBQZ&iA+7YKv=sQ!^8KYMz5&H*PUQ8lHS2vFy^tV2P@WiIX|*?r!PW zA!oT7CwbO;`NZTPDsMKOr5sgUT@O0~{_4}n)qb}l&jm`D=4|$d0rH7RP6ds-UYC7L zZqk(Q+y^Gvu`h_2=W`7|M&@oEEwGf&gkiHEY*02^CWGA)f0|SUbm^T7R&Mp^y>hQX zuwJndV$vt{{1vp=JI&}&@}vI|mb z5N|`gQ&kQMF|Kod{j-4_7ho1CP7Du&#lBaDbzTpsVf`)=sUtc4MnH#u4%ieAyXyxjOyuaPo4IPJ6J7_Sd^R@;fYYY^=+LW(@R^6?uzAB~4 z+dIjkjm4a18(w};Vb0ZH8-6JKp~*qFc>Ijct^}a})r$@-aRi4=PGw?{69#oo0TOS8 z9wOMJ7YjsXwxtx%Msed%8&F1fF8I?AWAXwg8%A0BlxZcJx!Q6MaXGz!b%jAii)cq| zJX@*DrS>zoe403tP9tN+eZD{n6Sqi$3-^|DkQtarS!CPm_!(%?Ky49D^d$=>_GxDu zDi5Ur{_d^_kHxTSnv2T?h8RtmfemKZpj61tMcE6cXw{tQ)q|!YIxR}QupRAJTa%Aj z&?0X=%7-fVc?>z-kG}7RjwwTp37=_YqlZBn0*Dn207bSM=r1sd5i{c{R{RNEyxQQl z@}EOiA_;`N^>7H#--+Q&>N<<*}T z>>gtl**Gg12T5nWKpyIp7E-Ue^Y`}=>^&4ls%sPq4hBoLdDb1!jG}8G%pr)G^|Uw) z+09u-w{d!y;z9rEwnYThI&}pu$mS+(U`Y10b+B)%+n%H%T4%yMcn~6%Bz9TpJ9TXC zu0|&YA5ON7B%zwTPqqzz%*mUsigAFmLu2cm*4k)^Be9Zz<;R5+!B7JDJyHL^DXJG0 zNP4jk6%BX|pHzCM=HVEWlA@72QIYWy&opezLXQoNqz5@IXH2)8i5p!ZKTmRDA4;`+ z9Z_Oo7zvMoShx|iszIiY)@(8z?B#iFh@L3wL)p_+@LnD2#s zFVO&WJ>mRu3&L3)c3baJ&Pa(?IL6t$=Qn5HeTvkLoYAEHM_*65;qmoa$N}NIooOt8 z?$fW07eoA~o;6ZZo^ynIshX?Jp7XJZ--!^vT)S-BaN8D5Qamuh!l;5^C90G^ z`pswiqeiQ}vBJ%#+u+n_80?wgFc_)NmBpuqGLA=%Udy*4Z)MgYh_Hj#)_~W^ zFP8KPG*rallJhx(bp5pUGl5az1|F!dB)#}Bopd?WSo&1>jQY7JLDjy#B(Zn(1{Ww)nTG8KgSpC2-F~|?84eJS6w|{3N5=9>+~Hb@4Uo_? z*PoG?e8Dn*a}Fgt85zpTI64vfxM5LXR30i{Hl3W4$N;8$GR}*F*2%ki-iP0}bYkj+ zXVIcnGkX_Si)qMmu9?&;Bx!hbV)@g0;%~)~b2#b22w423%{ZNrI`?7xhBZc=lEpp@ z5d|NhVZndja+!Yz#G0`-@wFDsa`ir1x}(pDd5LHj#DY1347Xm5s&rFR{E%xmvkuH4 zMjO%14R?v1R^T_7%`_fz^3EA->I?S5fa0l0?4bvL$eOpBR);|qV0Y;DSv|lpq-5x! zltVRWY!iw&`EAnjKTPZ@mUWV-p4?Oihr;7(BfC>SE^EvSvbu@xzmQoXTlx%(eQ)~S zMhkY6B93DX-L^_XN=$yCM99+{(JvzTbKNy4C!A)5aitCyN*l%09An?!gj!t1s1_-A zsYNKb0Y(@8D@=kwW-YZDoN(NPsrTm}LFJ|{m$WH*`@>vp>jK7kbgl@1%xGD!S!BBZ z7$(*DQR-wecel^EiH`_ND5!$*aMkJ}fP_fl_H{Jc8Zk4&sH}Qy5g9l0TUJg?!TRo* zc9i$q3i%=dJG|l5zv6!Ebcyx8q}tv~OiOHw5!YM_JPtWgXnOxtrL3a-xk=XyFV7pPDS|=zT5ZId^!w~tUx&`;t&oi zk16L#h9o-FQdWic)c}24ZOqF6dC4$7zb!pYsq?Oj#YNbh?u;;ZIAD?28xA*K(3#nxaiA z%kdOU!>c(K*(((UG+w>Z1If(|)Dq^H(H~U8w_xgY^?Ig~Y@~M0h7R2w6yL42iFUys zBCj_5a!k*FlH-%awbiff;%lJ(otM-asIPUD!F+WQl0Sc0zP9x(0VLnk!OFsV!0~I= zQARZ{kdP76mm-)UuX7hP-rX!h~M zum7?>p(}FgHWa3U=#1;OA85+PXa0(LmCGbhqJK``d|ML&4i-B=V#R;RLJBPkCdA>J zdA_?a7h@*gRUSu4sPMdK^E%5}O4zLLU_)uWdJxQM+ZITWxn-qHSXY`KM5jy}X5`#< zwQ=1F>>xzBPtHF7Aw&;2<{feX9x7BHj7^1s=;m$ykFZdLuOv3QQfXAeblb2GEZWilJ7L`J2waUiMQYN){(04-OLyXKGZOUr$Bc zoh_VUv4zg+>-Jg}_iOkHuV}b-pFdWxGFSJl)iK}1T!PoxCHL2<`xUxnxxP8+gu?4SiQ=ksU1l-KJj2(X5t z>y23|8D~ll{pa)RjnbCq#Q+xmI<#D+JH^{oJMT6s7w{sL0nmJ59EWfj^S$+A&#<}3 z&{3#S0$+abO+IC=4M8#TpmIA6AD!Hld-u_omaM$=Z6iQfrok`VAXQSq>dNmX4-L|) z)S>Bl;fsRL`SC@-vdx#-Hx9}F+2S%7Wi^vM2CYHJSMd`PSH<5h780Ay+$Y#1nC9~OH@m5umu^{S5V=s^mI_>D0NTYmfejIT}zUwVd>eS}8 zcs7;B{;kqTqsqbq50=q#7 zJXA7s30d{PvKRroXn$t_XjU*Je?T=1`S2({&SoqhV-l8@1RQh`9-P=Q_kVDA*P2RJ<79-=!Is!z-p>{1EB+X^3#Q!d5JFidE-t z8?yGC?m2HOP?GB1&?^~v!WAIN96XL)Df@fv2Ok;*a@gzyj zGL-Ke)n433?<24K&*|lcZn`bk1KtymFYK0W$nfkU6DnKJEd}AjlgsZ*3Z5SRj=YCx zH+=L(c8F2eWniM=o1?MWc2?0w!?PIKJd-2gK~;q1AM zPgtWjp9Pvq5Byogu*%L<&PaDKSo0%~Zfh6>-8W`%rjJe6RX%aNW&>ruvOkI?{ZL8oxW(zbsCS$btjX5lxF2ZD~D($3?7 z7aUn+m$~`sJtu;j+dHGDpor)Cp&d&Xo+ar6U?~jhk5+j)oF<#1hS0wK*2`;oP?nD3 zvypkI|0~}`QX*Me8Uk?@K^31@a=|Gd!J1Iot?IKRww|kmNf{tFeSS0D>@igw{&l}t zVIeMp$K~cmm%P(eGx#6!wMy@B&qK8~aQdd5)ASap!C(QexHM%bOJZFT$n;K~hMY)5 z&Rby6wqN3nUb`2Zv#Q7 zi-{!1DafDdnL!EqT#gRpha;#EM4oKo#AaH&AMZE1`wX3y%Pk@Nb!AVTLnyyLx-fbx z61ch?U1>@vgr)e=a>_;ShPf=>%3@mGb-6q=Y%=bW9xp);kA37MwQ}|1W>#eAY&;TQ z{DTc!6~JQF59_TWlcwV^R$AfW(OCv)k>pyaLukFwm(#fO?-wio*fS?Oh+>fEuX(1I zY~$~&S@0xk-*uC^bs}jLWqeApHp5FfIRE}?ayg@(S3(HKCZCe{uDKmV#~e7pKR+O^ zUZATE2fu_fqROWta62{p8zJeH#>(-&W3y9t!;aqvgxsGu!VFF9| zRj$*XA!==t`>HkcW4=pnf@(*$|nY*~meVYqcONa?o!o~{M^c*j9K30+WJb; zZe8iE<$GIznhYF_(JgU#iRrcJm;hds?0NDd-(k5PVQ(xo2Bp;q`hiDCH)(cb;Qzm- zozetkxeQMiQosQ%@;h;Y>0M~_&tb$g6g?D}%hAX&srBid2qpn)1M5*e$2g%i;~M=i zK=@4k7E-fQXHt8wf3|8A0-n{QEzoM%;h$rV`lx~F<~0F}#}DkQ{Lo+Pwju5y(1{$u zemYp3ReG^cC#BZz`AuHEL>c+-Eoz$wB9KA%{f*GqU$wh?xkqdp9xyZ*Gn`0>kFKI5 zdk`}GlW+d^(~G34Ant!PxSq1^F{M^N@cHj>v{n9s7MMqG|Eb;oxAtZ(l>Qak(piKI z2a|i}Phlb$1Ol~W3Ls^rX57zz&ZxxHKV=(igG_mV0%z5cz=p1=2sMLsgZ#}beg6I| zIY>f~3tiRGHR{XhE7hVdLg;rqJUerONln2o#ow-e%U&R5QK4r~!d$U&7^N!OEBbT! zbIHRaZRH5@xTOQJ(R%SeL!U{rasQT-yir@bm%A)+@7;;|cB-;&A`gT}82b?^Zs;-c zL(*1@&ke^@V3B!LDH*3>vrfD9t&_*M2B*fugUqCY zzq1=R>%Kq6-0(l3cLWOD_Be-cD1YYjkR(s5sNWx>e3!j}bN0%Yh{V@&S}2WFn&1w< zSvhaxXW$+GgS}YEb-a`}|9x>`pmN{huyb zLeKEC0!)ZAqirN+6kj$jJUc0A6tJJT>5QH_!kpQ21Yhu3cQXnxqnjz2rG8_YFrZe* zDm#gEv4Tvz7A#HqRfcj@XXF>?bYrjlu;YJJyrJf^EzB!Mu4x#D$^%shZPXDb*7=eB zrcV0eHrN5pc|(MsGwzuQ4AB0et5iWRc$^UyYY@AiUs-NOnerv!?Z4~yf93k;RjaeZ zKo9P-UwRPqK^H$|Yt!zTlRcvSZ@-|EGW;buL6w5KfU&$BN8JWOsC ztNd8{<_3DyL>RY@cAEbkAf_Ek(X0`Z7HtyfYY~3>3Y~o-V96heNvPdUsZq zBxN>HYVF{US%bvy+cMatzQI|4($djB*gxQb8$H@N22eSpMyvFj&+pH=V-SF-8MB()4#*uIb zDro59>+N;p$@UyFCw6WZ{nH&;GxEjt`+fdBo}jFQt0v$o!aVEZa=gT;_U?0AOWQ*P z3z6I27y-FRpaDtX)Fyy@H5m&r{(I2QBqrjhapqjm3-VSm0a&aA;F+jZ>An9KP(>PU z2Jj*}nRpP7BcS79ibXk=Ea&oTTKSv)OvBhBNbC%{u?bbl&eOV!h0y8^_AWmfN>&@y z6g=&INpG_S6lONe`xkn0vXbk5uSXJJ%4W=BuzjQmy24k8ZXPIeZksb%$*-Sil7M9- zj=c+-Tgh2DzimmxU*P)l!B*QbcUVOz#%zAzr%tzJI8cA@`8u%D3~O+ja{iXxbeGRX z;LqBZE!jW&+^gDW={aEzh#lHSq3ex*IP1eK#d>FuXRIvA#MHA1`cDQ?RB%B-A5(9D zkwNbJD#y@TFxh1@LrX5?`!2ILc-I18XVL|{*nDqK;vzC!jYTInAdbT%vbmtJ_tasuGqk8AbRc`7dg$v*(} zpP`HQoWW|l$FgVhI#I;b;`gW*e^?AZT(>50?SJI)cG9k18K!yjGcD6Veju6m&17JL zG&;up2RJtT6>huj*`xh>%+1}+kYCim+tVzhL3vdh)q}ax1P;>cU*9nSL8~7i5T#i7 zdi#F;1JhJharByVUI@KR8)Nzq&~US7EG71$FwyG`eb(fTa{Xd<$yZ4rdr zPYFgR-=U;xF+3cvi2d;=y=T%?)n+=>+oPufnhk!fOzC*;iW$pHdjmpwt*@@`Da$5{ z@ejgxYhB|Y&beT4KP>nzsi4{t-whbn_R%=Z1P?~$9k(swO=Ki~eNFgeVo56k&^8Kr zZLz&GpoR{$)Z*@)x3?J9vLI6Xq(eU}CIXqy=dahqyPkEth^e&OqCnv<&U9KXezttB z7OXRfx5-nS!aX0aNUy8EO^LzP&D_v&Rkka6?}gm{tt)U{Wb|6Y?bG`+DL;R|olZ%h z%&!vEjKG)2akWM5VE-NS^w%gt{EUNWe!)02!0@0Oil>O75b%tY)CR@}7sUMiznCAD z{X!)!53l#Ri7BV)>L`$bl|o@}EQXDj=nU#3(d$L+{hybij{>s1IS;}K~40+Stn`DJ9lTpu8+MQNw#D8z`Cb* z)uM4D)yyfEorBU=G1WD4$<#LX7f~eBN|l1VY3`7BsdA|38B#;H9cRgHt3EqA>Ev(DM#)|lFm9as9GW;uOy+p5IV4fY-kL0oupXsOU9mp^B6i z0RG6j(@*k7nNBNAJmxdgRRc=$t)h)b0(`yEfBWYGVWS%uvbk}M8H;SXZN8zwT}E3DpyqQ0)R zUu3Jx*{o;M+Pb@c;ba2WVOV}f z2xjvJDCWUwc8})3+bq^H4;Xw`G=n4g;@brQmVjl@@&e{Ye~DCSXZU<}m)c`{AsP{h=L$IO%mL_jPX(tGO!sgCEkh_E42m z=0#2|Zq~o?d=C|;?aTWpWvi`hft=Pli7Fcn24Q_QiACQTINW>Zw91R?EPl&A6y0ny z1JpS15C?)LFahG{W}t3 zuU}(hOMSKaHt_9&w)^?Xufgs+Mp9DJc3(PQaM3Pss1h7Ka-zTvljR!H*Mdi15Iaj) z$z2f#T-(q`pCB*cJL9H|e3X~rF*-cpeKYn_W)eDXse4At`yzA!EUYjF)ZGo0QK8%# zdwf$62wP-fxakQQ?upHiC`;%U8~ZH(Y6Ux?6k^7H8$e>L^aq1N%d020pG)%M>q%Or zRw`f=Q2T{(HtW~9Ps1uc zApsw`59MiTc;}V05(|GO_1DI<>_;Ps&ch9$1Lj%c*J1kw*Uu+9LhIwVqh7Wv8@k)O z&f5e~afHj=sP{+Un1S^+OJH{5yZ=Pe!$>;PEw%CR!i^Gx-`UPewgen~>MsH5X?WIWpudFKcu=5H;> zKw&UgRIoQZ<^0?wza=?4u}C$aiq{3ms9#A>{~uFl!4PG%Mr#9*lp0D(x*O?`5(bcz z?gphLrF7__yE{Zd8bq3*yF(Eqr5gdsyT^0Rz4s@4d+%?@`>yq@sHR|`jE-dFZf!@Z zQus)}uaD))6c~QbB&xtqf!i|HG#t;S-x5#tIPo6*_Wa!vi6hVt2vznoeJakHibi9%tu zna*X69yme)5+dLfK~Be~Kr%coqeh^04gm*Mfq6F^G56ejye%LgZ?-0c=8vbo^o5|i zRy+v-;r6lSM-`$MnEIAIkz|3>r8*INU!8t&M&f5qeY`DGu_+@0TfE&$p6l?C(Wya! zD*40ZJGInR!r*7TeIc#3>~ou+X{mD%yN^zrEP5VR%cocLLAGXI)GKI1;}N@7zEmTS z{L?2=!e879Ws;(6izaBc@F6ftJvP*+q{pQ>6J}Y5=ArHfRL+M_A&ofpbEf~+)Hgi^ zg^>l}Pyj?T8|>!Ledo%;xSRE!cu;?7FV9K-;F|D32}Ip2ejvVyh}|1kgMF%5Pxp6pmshJ+i87 zk?<4K_BX(zgzLsOWxH~{s~LUK?iBGWz4pDrdiJkebr{0$G@3GJ%y%OjapIRPUv<eUMCi~RTdl;Fbd#}wGD57B8yD>Rp!B&LbQoU?-aWbPpi+L+aP-z`%;Qu zd-#MH!#?~NiWI>|MBqA62cWlW(D58_z1X=R{~FFf+-V@K!?peU^g;f5Aj*0KSL<_- zPJ?7`F0V}2z0t8Er9x)ZgzwpgnHqWnnL*I460P(D1VEj;TWsH*Cg%r-TuS0V*hfa* zb}M0HCb_V#HgK8&XErn6jv5XQ9j^!4?SG|au6GBi$J-MtBWJ@d zG6%X3tocgVt2c`UP_-52%*5X=TN84;h+A3Vlz+uO z7jh=XN^u5_iXt664hDLbNO?0cZ$O)eOqXJ}UT(8hDLc_@RsR_l{I%4HD+#%Ki^$ea zOyje`bVckn;D{I5Mw&9C{PB};%8uBm{de#T=Y9*5u*MFvQAQ(!kda97#XR|e;BMt? zmaKjrkKg)UzwWhJH=GDj7lj+2Oj~SKpcPzP9th_hUZN=g=VE>=xYEYsN=cuJa3JPU z-T~&^(^q>8SDNTcvozpc?SLC8S+)2Vq)K1iBEX0BD?p!F3!`|7#Q*|%2rj` z!yMkfMNPl2iX|-502p$a-`=8qy=#tQx|67Cq123oSKvX4tzQx+DtA4KozjHT^>5VS z{~?)?@5)1`xVlMIZE};txH2xn?GZf)BQ5FENtCFJcddxNRPq_4V+x|ff@jJ)pQFH< zmX<_41(A5T+&@xC0&q_jvp^N8cBU~+-)Y<3cOCkGfKyR{%!T*4?gQ>Po-2*D2Pta! zIzF;A+fAN2SCuXDI!5!?FIV#KkrPPLF>7r8?(Wh?3Vj{*-4<94>e=DM9B@n`R&>9V z!oW+8j`f4qtH+cgZg|~uTV!=sgT%|1-Cjg0Gn47kF|;AxV;0-M^DiA*%;}&CdL=+Q z`pgWz?0<6^o{FtcMw-(>{t4)A_+#zj3NIr1t_musOpvo~T>D`BUDrS+xQP*x;ilr4 z^~W8t)XD*y>< zdJOQu>hE6L`HG!cGz%O)`%i0{TA6@%?K#tO@qN{EL(H}3x;Fllkw1kA<~bb+0q$}G z*^cUmZs$&U1#Di~ek&_Vg%&qqRm7O)hew=qVgg;HGb4rJE~R;_oCIUKs69n@kF%A2eQ|UOpjN1&6Ze%E=CrjRC@#9-{q}=fzgTF zGk>f>I@RA+qv;UEFrD>HY@uRjJzIAe3U+q2#YTPSQY$ij{R)do`uT|Ai7hjtQ8X*IAmdTg$} z|J`;6Jv%-=J}o(^#W>mRa~+=Tk?SmA3I|eNHL{Pwp@K4m_&2k_@mio=_gd?|b^=qw z&?R%^^dX6vm2abG9L@yx3t9x^jRt83ug{tN>E1jEpz5wzofN&1>sdJzY&DF>hjE(w zYV+q+-S<)?JsfHGL~G~61B!wXY{dXAPPT_G#b;@;T=dlXvbBdQv0|&r1Bu?&!sEpm zNy!k$P6r$~HfJK{WLOrR>|QW?o?a}kkd+&1*5JFqaIB}_`zg|)?G<6TIWmUqtUnju z2lxT=Dw2>BeDgy?%z`-R86c51WPt` z8wS~3k#WVN+n$@cjU%GNALdQ|f-58( z14m!5p+t@5?2Q@w@IP2LOoJu4_y_ThRH*g*MX?u-TXBI&pEF2e#-~X8xgD?d!*aR% zi6$|X{1TIi$dovPwech*VS~Iufl`O3`T26eRV-@{fr^{>o#Yo5gQVLe*`90wIp_jN z?J`ZfkJCZek;U1;SS0444?xQ(gT=bMO@8K1^J%Hm>JV3x{(O5{sTqZZ-qtcrCInk5 zh0{pZcYrKw$(YIpnfK9NiLX!!1*RYMN>eE_MLNDeiQHD)7>_j36AICl1}ZRZd1&;O zt9kJ@cG7Tfw9h;DPX>J6tp%TVnR@kPC=7-t=PD>+0?iOwa2DtA48;;Us}|RppL#4| zkGw3)S~LWWNy?mnd+p(U&K-x<1T{hdgkA*ZBV7rlNAwI16}7NJ2&vJ%_0GY9lkE}n z=dFJ?i=VO-cX1xcfo%peh9wdW>j`>w+gcYvnP`&=ecF;eUq8mf&>L-jPUEkddX;-} zSkhR`^hMENuDkOO$>Wjo=4UO3uW1U)1ZeTt5_{#UA90-0b@Y$k<7 z6zR&p&1qp^c}Kdy>`<$Uveh1B8ajga^RASIph~!DSA+;dS4AQ=K+&8OgaJgW}+_{Y`c`Ev0@y!&AP_5Pu$Yr!z9DAK`ilt%y6E`kK@;J zN*ERng@@g-UOaxh;g3o;Co_wZ05uM{h}KX1a|$k&R+t59N#T!K;Q4{@eBC>9TZsZ; z52HDTpS*_YK$@Ctq}GUtHWCmb~ilUv9K| zwF&Rg{hIzvE`29}uef)0s=rF?BWm|&)=}BU?$2$H5JrR))d8*^=I!%Kb>CL~aij?# zjRtY(lQ(cj-FNei4_SPeZ!|{PFU);0B433yh|hi$Pw`x}Dar}(X`fg4HJSV>_&`m3 zq~uBps%EZqQgO|Q0jKD-6(*|}Ly?U70y8u+sbl8m^C9oDNAZLK8e$g&y3X-i(|`n~ zuW^d5=-cE41;HRGirM;j5)9~u-3;9H?=)|PIp}FIAIh7W zx`9gJBs>EUXMnOV$OTvZA<83c@!$+@?h_|mdL}y{*S7hUiC8b{{QOwg-<&L&${#-sYlHB+p%O}t6`smOCHE2v55IF?Byq|BU}^4PI_I39)z{9a(_O3p z=1tEu8Y&Tk)HD57%CNXzJffvM4{jpK?}NfL?1eg^@w{)rPkrZe$T)q7T3Ssds-AkH&Hn zFRigzFIU>svUHS1;+d-=%hCKKozf0ADYnD)U9~#P_Dbq@geRo!Ncb~Gh@tj3uYQ-^ z8lIe$g4+W+ox@UvS#{x<(qzKh3d4l|h^;9?T^2{X-k<~nv$YE&b_i3$Zh=TeGE!VD zbr8v7)AzvLhm3+Na9yuKU8PG2mzLm3Plhx3c6FrW@|q~2!uUJH$(WEC@=BCi2amgB z-$(3fD?d|~%bX#&EaKsQffG++vmMm+_0Tx7(W}3C{2iz9SrEX+Xg2I}nDxps1Ufzo zkhsx&Kj>1Q$&5{L|2|JR8rUhmFmw+~9eWT7JtlsKDUfu(TrgL6Qa`>~-n9?|N@sW< zL=G`xMzX%wAXLm-+x6AsCe>`g|yG*RR-h#QBdc##F_J?+{~ zfcd;;BG)klR?8>+_XbjWqn#w>_GY|~6#Li|COZS&g?9g_wxSsq1o}^8#iGa!wC-GW z?cM>J+VkSYBONpUonVd(`KTB&K{5M7hSTHqgjL%6 z?oAo!;wGG_R=>N)91vmQYM{f;$lABc_dCeZD7@R1w!0LiJ0@sfcWQp?rt6-kzQ|^4 z_mBaf^$=c{9Ahi(-`s4)2_BDK-71_fwDf$+5V=`ys9I`yYQWo-Y>JM!jl^E`f&K(0 zBoMXMaY6qg{fP0?m}dCLq7Up!IE!LXcw1N#Y;6w;(6S@xm4UQEqH=w0tCqqv_-47E zz&;QDCVh=mk<_k0#rxraL!vN<0VWFEbtuCoQG%D=Bc$k(Kj<{N7U4o;cFn#YZEdQK z+p5!HFAC_!#sjZlIDCtqDdx_b-TF#F1A!Z|n&6VNNi|tvI+eeIvaA`%mqO2yf7Vfg ztL|g$5IN8H*r$9S3Xzz2(GAQfYiq%kBx>(m?bLM>J~gD-?JfNY%nyOW-fmSh;W{Kk z^lJp2dOkg?;qijvu?FiP*lxsiLj!-oTWeifu%fW49WgMvi5pOfZvLe zW}P}TW-^KGV@PTvt;5N|GY*?~HuwTA8b;qpour+=_n#tE@FBvv&MhzUl7kVc@QbXx zgD{wx+bpYK9mVV6vfyYoQS;v&X>$P_XQz=ActUTIoL)&-VxAP_ddn}>tng{#4jCo~ zoCLJo|MAqA`c!0ls5J1_;L75*>aY3l!~v*z%*(iPY92$%rDAuRaG1i&P%%yYH2zP);j`MMLM6OPErc~jwy@7gN5A(FAEakw@ z!`tHmDjpBOqQu}!;*f0l`NV>7_N?7cOLl^u84QmV7@j)7=?F-1<5=ZiJ6 zIC-8$^#59=s>m{<3#9>>Q;J` z!*9oWiqf<>H5o*tjc>=kSloR3Lpzz`R-{x0UNhS)lgT`sEsFtPFDXROt;_&b|qpMkr`V z4Tf%>EBrXkj)^YnU%YXS-&r&{uiWgS^%Yp=TiyFWOl)Vhd`6Idt3yC{MqZ_NP5Akr z^vnV!_`|`(-4HrR_B&#tftr=|gXXu~k7xR!x^C3~m~;X1j|!@vjHS!Et|$}SOUP}| z0YP5Es};cxhfjsK8e7?ZQ)Yw$%4_&{OIAb~>CkC%ZBU;%Y~*5pK$lv3I@{fgIGx>r zRzWdx=S1i5Z`|7VuxwqXoG%{pLhtWa(X(h-Ye_jdAbvGM~2|xUc7-dHbV^|9ca4G{;w}1w>}S+p=U4y1fq@NgvL{a_D6VSj{cDk{PRz z!XTv5v!rxT<%~A1rk*0j|7zw_?TKekHa8ow=Kf{I;bjb!INbZ>cb?zX8Db%# zPARn|axG|@FU@sM4+J;J(d5`}Nyj#4$c?bq@I?>H=NvtC`vwY<5^XP&pLI08-ErPH zqKvVOdUtsK5UKu3I9KgSSvJ8W;f-MySqw?P_=F~_D%Hs7u0M(KW~2;%EzZ%TICTX| zlV;)jI00QvefmuXSQyj+bj%{e4?H`wrMawn*C|V$eI>3wAC2Z07f-bsM)MYr`$z=i zeMQ4urer?YRfL5T_ZFTT`_nl9W145uT`4pZXl!acRSI{~M=F)R6l|oa0h!@HvK#M< zz=10eA3a`Uqw2Q*Q*R1~1{$aL;6n7_n%+<_efO`RXNC$I<%ZvAw^O;*vQ0xCq75Wu z3a$QNVCcYu=uBca=|x^{CIG-`P)D*G`43=$FR@l}U5Ug>Q8!C+h)+av8#=ci-x^TM z;l5I9ey)N}_S48sTK$NEgvx+f0h!08L7E~HkHEH3?kt-XSVaNfY7CciDO7tqVbh*` zzeK1d|0&D6US}pTIW_YJuTM<==Z7OdK-0A*T)rKLojPxCSb<;K$sJ{ex%n zcge~x5vef6ec}7`^=JB>IAu?69y4ls76!)T=jZMVE=!jmWK(8;{R%n=S<6Ydmx+z1 zXv}V!zRE#TAI9Gc)a|_mOfj-&g%|##ZU<)9kLPm?IC5WcuSW_esfk<7lH{qkFW{U_ z+vA?C{qD@r^|Yo0ymGi$=gB5jXIAX&DzGcmBsjFxYU!R>OnEQ!hT*;;IA>iaF1FCCl#I88Jdm^DT`I6PQifmj(u zUJY;~To_+_&AH|i$xB>bb4VO_3|=zRvm_PkNT0QSM}>})D=W{1))xATo*3ZsCE9Mg zU3`ko9P^8JOawqk*$?80=^B_sooxBT#{#-8uBan7E{_GN7Q@cZmDw;8u*QaC_f#t! z8c~ETM$%ka>)s8NbJ1pFU>X%DXjoIG1tV6mcK*Mwi)&9P>?%1ie?wiJ-Cxeu`THHv} zS8i?SFxZEr@^bfeTo??4#ce(&=$Jgh!y3Ck3`U2QfAje)wZVzjnkqZIn~50ft8!g% za96j%*2lD0?=haFW~V)~JpBPu8?Md;VJjWFARJN-vxZ(f!)W*>nFB{6M;H5zYI)a> z0=A>SOXfhrsIzcUIii$;w5^Y1a+scA@QRG*H(09s*nwTe1|%=L1EuL_le z9hp3VHMl)l+LFxvfoxPkLsvAU`|96EZXh;tKki=Aicv(EzlxK1cT&P!4E?J5RllB} zS)iUf`5-HJUDPH|_YUF6#5@70=C$l9gAUVAY1Esn$)dhZA?TTlLEukhZlzjY8ExC! z4HYw%I5{$?EXX?qjZIWx9-^a7pyci1=nasJu8cB+6*dbjJuAdk*-8_^+7Hq0h-8A% zN}(o8d`CtCtq>`Po5b;TxDDq|7c6XhRA*z>8_;Uz|BNf5J24?-5v@TBWY6hu`U?6 z3BbLK-Z76MnBHn-F`QZa6Mr_Ap9+{=sqT+YiC#VvTUs%Ll!=&lIvOeeKw>-428v`#~&LWY#c5go(DY(Bx+Dn;S8@%7|cI{^`tgB zv7VVkh?4?b?Vo`efCYspDs$2#iap!?yFCf7-n^i_u0Iw8H?Hz=hQsWexLmKkc!Z~^ zY+lM8h8_!G~4D_!8B^5?j~Fz?tdK20nM=H_mL|1&AfMs0Xv0Ddh41 z9F{~<$9y&;U2|0ya>Nw_No*EBYVGD~?Fg+$vpHS|b880!Z<1yi@TAPHEf+KixjRaF zP)Bto6KW%Sco}UNMiAKSBdmBtYNDI?BnGOolQR!KVVaLk57IUmQN$6X@mldZY-pxC zm9WvBtUlAW!(lD586_r8RI9#(>#my95j10NNe>?ug;F>b!Q zy(%$oX{;=KRjU26t0aNLbD0u0uBubxa&FjzD;YYb~c&0^76Y475( z;}auWcxeF$gBF#ohC-cZ>LF1hyP2L_KZY6C4!%tmR3kY{pV7KtZ|{IVlfe}{@oPqW zT!`tJ{>uOEbs7vH)aEQ$*?RZvo2cdU&=&{!GJ&3PO~Di8ds^`+V`8}V!m41bKjmudN=SjS_&%uH-B{U5+0$VnsvZXVTB5Lj;;PT?__0VO$QQLbU_RF zDa$`~RCNVlT|{JH0>MekyFY5>hQhvorgcj6Y8d@N#xc0S$U~eAn>-w_dy?nCjyV!u zn<>jq8T5MZ;o%D-EMETVe+{JT>3G-eGZySc5#0v1aU+KF!=HU)^HCD}7!=tW_mp`U zl@kVDA$Y3YyS0{ow~B>`MPFz1)<2@eiYTp|EzivQCV*|+CaJIC)za+I;^X?@UtGrD zKiu92mMMk;3pd6Sp~byO%g2`SLEd^yMZVPVP~S9t@F*UHMND8cuk~y$R`*f~$gzWa zf(RHCpMqV1S+f}X-Jk8?&QRP4*iNKequW;eVGzg=l5uvfoZTjV2dFIQ*+Hv-w1=>i z#lnaH_(%z(1`iIsYFRNzF`adt`#(SnI5sm0LE09+_r~my!Yl%|)&@L+<*QD&uc06` zIPWP-2A6$LbHNFcgaUqaG!jA{GlIkmB4RVkEW!nQBNz4BX<{NyV6D=htEz0k^x0D! zF~nVzm>V0aGrK5d2%g;z*)Vptmx}x)O7+nxhuAN_W~EH717AS}T=rpOWW!@sdQ`7U z;|MAQR4|p^tb_EZi)N(4lw836IcWFAF(?tQjbs_n*XEDhZjJ+|GdRBUV!g$^k5K}y zk3t*Btj~Tld)I_Xnpsx>eOCnG$!|O9{G5z`OaCq6f8s|12WPcppqK$dztUQdr4_lQ zLo=(51?2DG+fYD&+DEkjkwYiva0pWE@q-DXwS55AN|^yXN%Z^}rc@LLfBFz<(qEVY zqn~WdT*g1Ilo&BwmKn!$H-?k)Ch_kqR5CNIrW+5O{EFhHtOh0#IdnY^nPbUu0z_CxAF_ zDr(=u56l?6Z)Vuloo@i_-va~l4Vfb49ay)t<3xY7{w+=r-Qlhc-XvXvYMfyto||Jd z>W_kx2`?*N0-?{fYJh+Q3Ty_#hqj&u##Go0nfbgqAy*pi@q9@H8PopdDedIPhgqTz zah~Y@n|KM<-6?RA2SwLRXg*f%7>)4n>9vpm!hfgAe={FX%K7Zoz{;c?^o`k8;rrQ1 ztq)lA-Rez>r+O`3QSo?<9d*%m&a*iNPFnkk0p$bz2B2m(c<)uFF*t}?-jYd;hs()L zga$s+Hgi*uoBDJ|VeFJWabVQE+pWvMe?%;mCoxp}8e9?g_68X2A!3a(<>dUepu2W`D0X+>Eac4}Y)L3) zg=&N@<(EJVb7h$={4d1XFt1}fE zl;Ya{9x~2k>4h4Tzgs>D&uKEWGd3PA()8he(>H4S^V*^LJ8R1*ej+I$>Sv!Yob1-} z3t}fc2mu}=MxN3Nb=0Wc)T${+?1P1l@F_`uT;nLu!qSYtD4e!4ln!kF>w4I?j# z+tvgsXzwv6AZgf>f=-dcaHl2)W(`r;cJ-j{vX=nd$h?-GzKvb;N{(kr&=&Zqqw!3NL zB!};ywQ}{)77Jn2EmkmAMq3S0**h7cOK5}PK}`_rW!BPIAOeirY-lVPO!~e4=Vya6 zN@=m1d~bOUoo5q??@0IH^c;CYZWjnR50YK`(fNk9D)0#~+^x*V(_$!AnRlanH*FSL zXtu>Lqm*fU&Es#Vq=t-vuRfeukrvO^^8tC^Q9$j3anlA&-gss{%p$PLzwLmg+Ab+@X7-8PK}`kzq!EeWTVSD)^|`tNNeX!DUca;<@2x7L(BfP z@EU!17`s(viRmrgO@=5vfj@9ZLgBbh|0tXBnmn@kGzp^p&O=&5^=n?6t$7p$Ht+d9 zvV*@tklru$u#)N-W%NB$&Wo;?Bh<8E;Z(pdGk-^|zwRIaFziJ2~Os&ko$s_@!jOB+j^|3sEJ6G}2BaMa&Qo z#3y`JT{+E~e1Nnkfc?b5H=Lds6mp4CEP&h}2GoPGWrycLhTHR$Ig&5We>Ae0o?g^r zw|#H^E2^%h)l`W(vT!tBZKc$Kfn0t-(hU5?5E2Q`eM2)w+Q5~UTO9_$Cf9PnI+{tU z!YAfKLFKPAy$v=ojBx1j+P_stZlOUcbijm2(~pOY^zDADe>C}6MFzMp)qJI0kjGX| z5WtUrebf?lVC@&07HW-&I-XP}S=_)_z261$O}@A@O~I@2Q|L@S%v9&AvnTvfU{p~2 zZf8}swZb^R&EH>m%2 z9MV?^fKt^<*5YJgj7Dp{F8seXP{i6!P>kZ-|p9smc~R_5LhaiZqbh)-WJ`sQ^& zOZ!+Z3|rd?+q?5~th(?dtNksSC}weiE`$XH)XO#{?Wx(i(KO?o!I-JGh?L+tfoYfl z#uqhVUkj8n#8(-IK%FLzMR&-_~{D1$XihMGQ_e|vXi|8}5-6qefo60@g z=p7Ci;*EfXdNz3JmeFEhc37eHBp3oL?L8!UeNoMA*8HW+GpW3*S;1evfk0*nf7Pv; zqi-}F^d_VFKg0I#M_mW9ZP*9*y&pso#2f?x2_U=Sze9kDTrH|#yOjO#qS0a8kL|-Z zFT3D}oX`Q_pRL@RsF$@+&odu)dLzk`y5Cqmxc&~Kxa%c|){45s+QD)Kf<_|*4YP-# zTlJsOkmKd`@qo7h3t$}1ZyT8uFvH(2j|azzFasg$wtzh?Bg*}stEnJhapJ*n*_+4> z3u;8GtPkaxVfN$fMR>go+j2s(-l1Ve;-y1qm<`OlCtsEX{9gfYkK$3}7{8hSHfr?t zyZJMVcp0%4l=K^rivaDvo@SL+olMk&J(<&p)zKJNSatv;x?p`$+~u3SfC?MWsUb=0jwmk3C# zl4kC8`2zCaeg^%LnAe$VQe;MvgPrGB7gi=CEsS!&R|qOeajqcCxYp#macm;6y%p5i z5%Q?poAahT%Pu9BrB3%-n$jYFuYXQkDZuP@;l8SgVC7-VRvXx18C68!wn{{8%jvu!TB z5C*B`Ke2@r627D@KIbp$UQgqZk}|uw$3q-q#U3t`eXItqgSdAn zU`*^mbM#b-#kjeFxyXqCVzE++B9|VflWF_`Ni4=@J^h|WV)s8mE)(})IGyl$udwo* z`2FD&{$4NwVT!}GvWG1#KoMuTpY@%IosE|?&CY0Rp9bSF1}i}J;)63qtBWqPU-3mq zm)d)RCsJ`s;-A1+{K*$(V0tLTvZOG_)kr`Xs~h{D?#Clg!^RJ`^HV9|XIbs;d8VmW zyuqX}4ML9mpQ{-@f}$)_sr4*uTk%Q?*L1n<_@-3_)3CQbqZcKD;*;H?!R zBVPc{i;iOSeB2W~!F#zo6yacs|DV=;W;;7m^xAlWJgjiHsUFvW)3GDH=8f#@IS?$7 zJF}~Q;d}v@g_|t~PO!DLy4#Apm0CC{Fi(@xha2g5hQF7IC-gW2!$AK8mS0uKjgM;4 zvZ(@%Ur|^KimBnbAC5}T1VW6vk@fqN@t5Oav|F+>g&hUlK#7VTwSx>v*_@J$5CnabIaQ_R1?hU1cGsY*hu?CJ!W-lb$>|f zYG2$=0aY|JC{kc7lHXDpZ=O7u2r9(ajxO;q!&9zH2fchTlCw-9VklRC#hLHWfEK73 zVgKaio_T7i=_^Bk17^F$y((TaZmG%8;PkN5L&3^|^=Ip3e& zzVZ!-i%e&YKTA^fseC95ji^tdeCwABQfgG<4@R;ypY-^fUU~e-?D~>mRn+}G>Fn0V z%1hYFpsSi)=g^FtvmWNd1*it;l8PWl zD>MKPC0YyeOBluAVD62kaUEAc30VtW*IB>RD^#zD!n;NTsvLqi48r%x93Rr3eff7Y z6of_|Ox;(8#d8NQD$z3r{YZc|6tmE;gBU#o+iMV9h{&hiA-+BT6#(;!eP8)Q?2rF; zDIf%?#1lI1lz|%Bm{mlp&2aiNLJ9lp3<+DLEBA;ch07!wjqJt=t_4P_|8oi2@?mjK$dy zp6`!UKV5y+*-_f4cMgHh&(&`IPVoY=C4P-WYh5G>CcwAS?1F7@#?>9~?T63bzcPjo z)B&=9doI_GO?RV561;v4Vs^=KkV2GXB9QYeh$0u0S<>DQX)v~OZB@*l9r!+%OdytJX?v6~e- z)&EK5@ZyzR5!K%##d-G1>*AlGSSZ0fpfW4|)|rkNbyjXx#A)fIaVz#qE9%x0gV=Qv ztmB#LT*oBkRL!l2nR`DeVjdsV7-{`v=sDkhT7L>OC#C}*SP<}$aMl1&h5k?RR@_Is zkOI6Vv(U@IjWeo*RkajTGsdoi{a!P8FRq-R$a$E)P=Fu%{9Xdb0DWQ_3HYY&R^&IK z$c@;?0I`Y|?dPRoDJQ@x{v$@n3{VZ-S-}K)Oo-W_HA}=9kFD5&jV})k-U04Fl&4QK zTt~!OD!rcN%MsHaWU~$j@mR_Xf!}k=@q>r5?UePy1Dwg_b?g4P#v;^yqKSm+Bvi5t zy~A5o78EGix%qec1#Yt;~I8ei6W^vQq*}yD)7&sf8R|h)e=DBmZ zLQJR?0fc1zbZ?Th(V?V7O_kv+>3&5REOz{I8RZ^GoosFXra(C&OW6v6++A$tG`OA< zfC%#-G`^kX!28~@Ec6A>M&dzma5`R993f&xqV}AcoAqAcSDc>1s6h2X&wCRBU!SH4 z%oz)NFvch96Z7v7g!zRoZ16gGN=r4qE=@`LmFj-9%0pcMq!0-4SrfT0_i9`lSy~x< znW$)(BM+IDP0KCmV)gDTSHVs2Anx+T`wA9PGfNwXTYWL(>RWTb)6dhL7{t`;FBuZs zhnJZ73cP%5t}xnyUw(K?B4t)&7oWJ!lv1z}k_{X|(AY%Mo5~!S|6(5;zToFwpAgXq zrv;c2>vP$EfCWlO7nhU8<-dj7IHY%V;S1B)@5%VwdCRZKfy|+xGR&UPmJ?b%C9L?( z26&OiOxsvH>~-FUIgg)U)jq{Y=y$fC&EcglddiYyqun#3WZ)owB&p8UZs@@ z7r8C<|6r2)p~9l(;cI6v@^#$94f)5>^n)Pps*4NrTI2C22iSfaX^4VZE_(Voh!EhT zv$RQGQX(ShWP5=rq7~zL#Q!jj4%42%ML2BGBSQniNVeo=8$M+0FttP5O7OOzdp66Ip^a2DCB@N zdSw+(XQA*yN+bW$Lh@lRL7G@F4^ykPa9s7ykifOjXH3_+ez@D7#&(+zU8RiL%6vCl z&jn|%d!zG7z#ra#8l$gmgb?8>6^7J02`UL+&|dT+hN@g}UZ`9+7A|*^g}+eq!b`3N zRk{ZpW$fQU!h?F*I6hF608nl#Kxp-)5ql`h9zna20pGEny2c=tdc2C7Gr;MSn?10w z?bV)(2=p{jwEB}~j5_u7oRG_X{ET)mIK*iUXW1w6H`rwgGPjtT*d9H^lNuk0BL_Gn zcfrIQeJaqB?FFL3cBLC*=^xp4ab@z=wgVMhABa}$j`}A=BO0uGGIyKyTkT&trZf$t zRt884j8X(0Q$CDs&3k5RbiP3TAtcvwGwc4h(2W-hsP0H!61#{Hb(?Z|-~;i+jzI@L zkZ|UdP3At2Vmg$XEaWqD({G)6)&jJU3S^lRuhSA_

CgglVp`ax2dkLHWbw&$br6 z_4DA=nzPg2G(vBj5I(V^SqJYAUmz<;|^lOGG3ZNDzG?kP?WM6N|a;@4k*!*BvL2iPU+C^l||LixU` zV8u_kk2qa4cZ%{%VzaGTj;u(*GR5Al-D1=^kbDgtB?$Ay2ycT3G zwV7MrmP^ZiLVxR;73+e_xbo;&{4ncq;Xm=gU>v^V-gFgKkPh>eQE(~!0(n#hNP<@N z-`D?lG;D(4C3g!8)J~EIbV(TTN8M6u3PI0Qcfw0n+z>cZnE6tka@B3B)dQ~$OD;&GIF#ox8rN6|kx0K2$+IG;ya{5Z9La^!hk=zBa7^?4eYfYKx znsga)M-V|3%~MEQ_f9Q7@*cd7t*U^*nZC%pl4$aY$*pXogMPc&Z~$~dAtb*I!1>LV z!-LH=ey0StJv~@zRv=H*j)ge1ZEyg|iF?U3T${A+4P++AAZ7=*0?T!!Y-L$14uzo! zm4Qt^wk%CrL!a%ua274{xr`e8C3{HHM;%}j^kiXDcKq1_r`tF)y{b~Mm%tDI(o0u$ zu29$ucr9}_afOk;_&|xnHW@-hFcCyGPwlSVh^J1LT>e5M=h?DRpUtRehbO{wRgoG$ zDpvs%TRdy zSQ>UV>5wA&ENPX+iw@YO_=Bpek}EXlPv|a71@mN@+Pv;X^%m|ZPAtU5T@`(2i0xta z*t7Y8Uxy==o^JC&d|noPWOq_3HP8vYBf$I9^x&3Md!iIyLh3=v3_>Aq1wDkKIN0A5qvRywHgd)Qp z;@dV>vJAZB42@4vg2UHQdx^W6Iv2Z4~t@i87cfGE_dcC{9!! zURyg$W!PzQchS+{M8`aDT%W6`hgE%R9`$J4;o*xVi@mOFAj4f~QDFDZdYww>U3}5l z^_1CCc(&ZA6IAoYDoiA&Dom(}SYLq%=k>#B9h2ojt~S~4)mGh&IsTUzH$Xsx#s32z zD85v)#fa&x21C%@9QDu{KI@}E!_!&T?;MCw%RivD`I<>}jeclo==GzldG#JCj79&+ zBy4uX^=HWrrB%cfP(tJOf-%Xlmf|uk(T8*A88f9C$J}6rNOEPv_dXE2nK*G_{X=lG zwYcdii$0C8u+Ij&K-PTFVaqaUqZMDuE$cWt3nmo5T&>zf zjm`NMlUh?O=l1GVEJXj&Imj)D540C*yRW3m*&WnEIOb?23ZXQB001q!0=yBY7t{|v z!8DeqTPs!~!cB6whdv`oypltI?>p$!c9&!oR05!qVQYY1gXiDoIrM=K8yvQO(om`W zY@~i$541G?aJ&gZTB$5FguWr(ybAnP-DaSo44os0${mBe1v#Sj^Yu6n(XjF0?x1Lf z!t?iXrvjCQ#iZ32qW2dWxKwE-gG>ZNnE_&g0~Pr#Cd(s1*aQ8JR2Q=&Z1yreQX@=* zLoF+;<7?Xv156g02E#fSs^+hnrMa&k8rq)iQ@-Wz4&|i(+%Z8UBxqW<_%CvXZh>car-ovcU;A(|2+`IO=hjXrG$OLUVAzo6+;VX=!k6Jxn zHQTyzYXY8sty|!}S#nzzu-4b`#Rgo}zpZ{+M~Sjt-@~mos~fRC50`|rLqE{!W{?$4 zIm{3#z>f8Q%?3q`dDgUcdh)f&Wa+u91vvVFVkOn8XN>&XcLjXN{>k2VV)}qq-Fu8A z1Ci89`iI55(F|9^`34`mIIqjHQqAFQO8hWs7(N+UIB2{^Od&u*I9(-huM68NaAJmFde^lYC-?G_e-bf#$ zl!merJm!PqN7gghRwtV)e5Y_XhQbIUqjueEm;Lc?vym7ek!PZJro(=cnV% z{~ap?A`-|^q%s5?#y}@$xwQ_2(BlW+f$Gh?wj1g@+Ln*LPxJVYE_EziS{e`2RELOq>dyQ4Xuec;1y(-7HUwAz)Ol;;1nH&`N zQ{>Ac8aQ5%|5|nTL*nab5IH4x;kxDi@;9LAq&j{)91)pEW+#ns|0#&`qOB%d7NKL}w9`jjG_TGEckOIh)<%KPn7 zhHZ5&hBe^%D|vE#-yaBIfHNy9!86Tf9tKQmUi#i|mT2Xg_jx(Vhht232roGsfx%lj zhp4(=buY|%%4g$y=huxlym#RM=}d6xc)fSlQopm);>zVkj_!7DjO9OJ6a`s1A02hm zjwqsqUzN?VvxKrf%C~5G4%ySXC){Ww^JBy=woH?^ckk7t3cVpCv%m_y;n)=D*@KI2 zxM4niBH++q=1T>qyT3eRqsBC9RZy7H68?tle{6+(;{d5S|6P*#cq&unIL*hHv5Yv` zjCDZmi`|g_quseU7w?RZb zCY=D$X2%_exG2I#H%YJwNIN^dp?^#n*%icLaX8#{?DgfmRAHWOxW?o2>pb9z z{0kqlo>gE8eX7U%6p)DBnTKU?E7~4UeZf-BM9-9Tn%9#rT$iP(Mod)O;&ao8v>Sn` zIY8snW_F{hLe3@_D~t{MT4mWuJMtH zfKmtaPpj5Be*@m<3M=`LB5fkQZQ85nT+I^rf0%l!xTw1R4^#>1 z9vY;(yE_M@rMo+ZZX|`FJEcLC2I=kw0qG7AM7sNIp8tD3=iGXO^V@6hwbnNm7c{zL zyOf^bz~Fz|!-a|efdpjncRC3v&A`SHkuNVJTWO=%UurSC(9r;&DFlQx$-mK){Q-10 zZ^&S4>NXYK&Fs;OHuAe(PEg1z{NkFGbkyZIe_Tb&jbG@tR~rEN3qEU%i*o8G;21X| zCgg;jU<^&61PuyFtz+$m)l#V){p!odrIG6*DO4fp4B+=PTn-ls8svO`o58Mm3t*4z zW`A6EwIe-#^@oZi^>Gpr*kZQ2Ia>_zw%%Xy{&o^APnv`b zzGW`43kZn0VkwZ&A&z9k!|*X1L+Y&VQkLEY384lP!?OusbGv-?}?mJmPAQ z-smjAma9|_0$Ioo+6NDmC@{{Q&4fU(JEm3&_lE`Mm6FD%GiOb|M3=05Ag83tAnT0% z9Uyb!0ag{`sP^fH;^_4HD!r=X^q&JKh{zj;q3^m^ob|!b#y)C+kwn-s%`rPi3W0*u z{@i~n2;9z2G<2Dq1N=r7o+WNCvIo0Cn&Jb}vuAC{MX(X|0Hvr+0WO$%bs4RwVOKBG zpxT8S>8AZ(qsY2u6kkZ8mJaoHAwO5cH%WEQCJm5Tcri6d5n8bVxm&kXqm%t8t=;*` z{h8ez{LX0hg3)8MkK)8pq|BC7PhxZg3VZz`QLtjrgiGT932r}Gl4(MYK}QfSAdl(M z-ZDmV?R93>%f5n1x|WB2?D_Tzdi>CtfpNo_z&%+v-VGWROG?4%A{+V?8=waHyP&1b zwiIN$LYCfQbU^}y)ItH=>o=1(t&rETGyn=ocO*l02BT7XYfXVTJs2Q++{O5P9{>@N zN(clH5r~qM>EaB3A%0CuOzcHw?YcleJ!x$%n`76R$n^B-m#U3oQBJNi)~ z>r@_K6|oDN43qolNgEO0W*ygFb`zRvwf)%~l$iX}Z{$hfmtX#VgU&oWr_3A_%03+Y zrDuj@^{z|d?Jo2E=q!`rvq1!4(lnV_uVH6CsmT(@UJb2epUa@+62Y;$X2^slgp<)Jgj#KOMP?yUd!1qJ7LoT zx_{7A3AX)g6P9Pu38de9kpD*<&J3Ncf_MMo;bulz!)f}`d;)$5)dg< zNgW;*_FMySEl&6D<(v=Q;t9>6akowiOklzkW3`#UsHOi~O&_>~At%zW0V8YX2f>=h z3ps{VtFQ)YKSWq(hBBwt5pA%j4h)(m71U7`P(unyfAUoMg%9BmR#4R&OkUR?Jkt8} z+%XlSE3g1xARiISb58(tLDjfX zkpNsKXG*tx-p;3f0K_K}q=F^=UU81~X~4C&Ga_Jeoyl+7Q&(Rtez#?PO2p6T3~sQn zef`DbHAi~(TIB+mSkTKwIypl<`V=VO#q@Zy)v&V{ETfvwayT0*Uj#?YO}=_ zBT2XCV{9JaD59i%mlgFAg)Kk{h4a6G6$A1T*R`^bBmd=aaxuOW+6F4@=#1H{hE5oB zNZHNkm(ek`6+gB84hnjS4RT)Vf4ZQ99Pgu(ppu1=8ox6+9K-wPKONGCjMNS!)r5Ux zzGcdUf1z#I?4s6sj6Wgw^UODsv9G|SBS6)A^&+n1Pg1%v(>DDbXH55>%T#XY8%3uV zUhX*fcoecQ=|?V*_*27s2L{mIu2!+q3kI@|OM09g%cv##|J;+XX|Lg6{7_Av(*Npg z_#Mbb<)NbV;ZqpXNeJ-PtbAzhD!~YP#dn@B8tLP7Jk@nAkRdGg>Fp{j~&M|(KbaTUT8>=TRtGDoc+SPNdipK+)8?u)~h;`oKo<@ul1vLwz+GtA^us<$#% zJN&dUQA-I*57U;vC}IFYhQ($`(PH|4BUF6#-2g?}2Cvm0U4*G{3aU_LW~k-YG$i>-HIgk`qqnU{@S#{3KNpg9GqLSE}n`NUFwHTBgVtMrl zCo;yFrSBV2a`{>@mZyz?rX{{SXUtY+XexXIy^F$X%nZ8Rp?zZpgo9;3yx$2{9Dk}V z4Wt2#dzlW}ER6qc?xP&Q;-i#l46+)(w-~2mwLVI^5i$FNn3ACiqbpgDm{izQiS=_L zdg0705C65c)m?85>7n}Q#t@~R6yiye;Bw!_GmDyS2BlEze5WBy6R4=VO7qSdO34II zUQ=>|!lO_Sg00_bfJFGu&{meFLZpp%9dUH_YtYW|Q#OCl4z=f(jo^kAE7QCFDuTHZ z%D?ktaUnd+!_Ud^x~InsXsm!FZ@oKSSV=)s~XhLPC*sTQYh=1&U@Rx%4v6RkF_@ySqzl7{{|j02BJ zrAm5nko{MVhK9~Z0{ho!@&`Ozs#A@zs2GtJ_Z#dzlB`fG#jGeju2x50JRqW!)h<0H z&&bH4O3?R$K4cy~iii;icAa%9&?^GASfZtSE2%j`&Dy`0jERHkQ821nWk&rZK@ zhrz-;AI72>N)gq5U;_h+%FQRpVSpO2BL;_>9;ol)@4 z4ALd4N+_e+HTeaSTKBt6CJ<#NFfgF@OKjCv?w|E^F8?x)9Ik2R|H%F}LYs4jX!aXw zQX7Qazb_+5u2G%#paU12J7A~K-yP<4!GQ&yA0V%_K3o`6ptoDbpMze%c5N>3qn!@r z&lUovWK#D1o#hWdA(nFfyn9^2WY_J1ez5>Ja)KI&(8a7eSj6P#@^bPJ0%(#Hj&vIm zt>xbX62EI67%Ks4FoS3TKY%6d^pfG9{=r#5srS|Ye+MS70?X4p`6DO8OktavKIo|g z_0vN2&hAKZ2!9Pd&0!O}4CiYqs6mS>{LRV6(CpW$sW&vfA(R6>9}?Sem9-I2ZL#RN zt1#C|uUU-_UZBGlQ{o@-Y#-fMzuL64(^!#Y1-k|1xGjYT_scdntcudu5uWOyZRLb; zCF<08;k{$|3-zU9qO?x`S3_?hZ4qWx;yTD(mTLSwVe)Stub5KT)<$Ao5bvXEq(W0K zUd5Pc&zw7+u=24m5F>5xw)a%KeY=el9Ujtey^-MfbNS@!n(z5VuAY-*Z=Gio>oKpLw|QAzm;ITbp!FaW5Bzxzt=-)?;-E3|8Q6&+vvnHmD3 zq=7awP2iw|gTN&-iH;iAZGM(93H^Zx5Lx0Ysg7#4dNiQ-x&(PKw{X9|rpW<1#2TRW zRYJK82t;YdhEs5@+gg1Gq$m&oN3=Ng91^qkki=km&N6nG-g9zmRnOwXe0oM|^02fV zd6$lmV&q`(K!w@_i?Y{)6;TP3%glfxwElz*h|wcxv8RrE3j6@D zp_VHpj=izFck5FqK)E)0yyWC&8xcxD#JN%3&3FVfxpS~P_Q~b^2B)!&TLzhhk(vRx z?qR|?4IJRUrhUD0ASM4k&!xnVdjk~973}USGWo(^ne`2J`0W} zH!QezL)t%dyn}0K1VJyPzWk#}#!Si;eom@AMwKZ5z7e!rvM;mQ$9s+~7rc}{>$teT zWnJyq{P8;s4`-4(x6_SCsOn$$KdO4HAEJn-+{xW&{jI%nMSm>9f}@u$R3tGR znee07$*{26Sfp0)h#zhABJR5+u9eg-cw2j!QtQ32cTu`|69Ea%7NSp$1Sl4m5BSMl zmMhrfupV63VMz3glOR>f%HsmVyA^ZYXhe<AIv?5*>*pH*Zq3L!+PP%r+8IYs1|P!)mwV{&gRuVv`zwQ-Rd3g7u|zHa#!W#s zR(@9hgqE6p$Q)NchZ!=4n@(z{I zr2`sDm2d_njsat8HJp^qO_Q+jaMQi<%x1vibfkR)sDfj_A7yR?CNBqps7$no2*`CU@g{@3gFZg*Pq1>b z$}0gV<*>NcY8gc7qt*w?m9lyF*V>v~&X${;$zH1#Q1DqddqNRzZ#J6mCbQrMGUT-* z0B=q-s)!n_Gzpe5Y~M_~zsdH5WC{ z4C#<=L0tn-8t87gg6k6@~t`gf3qahYDrY?h$x&hSz%{j zolPeht;{+`lWQQ(Dv2foCga+QX%yU`M%j9gpVdupeb^o`Cez0KQUv{&X(Npc{nZ)hK2dYzn7BL%mRDqfSp0Z`q8iv6W zkICRksqf|!b&1W8Bi&^+?E&2oNiX)+@M)14>H%O#Qc0YL!^~>qDkmgHZ#T-B;KHTV zeDK-ar=Z(?Vlqv*q>$Zvij_4VWs8U6hNSvcwSz3nhhO6$Is2xtYcFCQ=|t*qu4{j? z5AYKX_8)A-lgywT0A7QuKpm1{@K?7YYtKgr=D*NQdXZZ0`5YsvPJC-x7=kh24?8@t zC${RIkkm@d%P_FFzLDgMxc6V)DtlI3Tc3O4PS-pVv-2;2skRPIIMq}PYj?s<)Iny* z27t9~#mB6CVgFl9>&YCo&M!|PZ)(!O4*c%>n5@Q~F|Wo!X2XE{O9>!q^lZ&*qu{j` zI234oY?G1bBVHrUH~los@U3wTS(ZU@^|ya)=nKaGkE0Jqu4ZFNr16P1WL=n?$2Au-kGOMEY*aLMGaVP$Jq=U z1*?vL@72BGelSB6SUsaKV|P%~lyO!T1=oyQhe8$^JPk~9OTGZ|srV>~Tqb)Lk>pw+ zjl^)<^{t{Dc2U6hl@hp>uQ?3!Rp|>APit{9;aY^irf6$Zxz*x>R=STeGO|kL_S*OX z%2xf5fTLIPV``3Gr@wA##wx?lo0WB=%|R&o!pv)XfZXPZ0{l&+ncwr$*;2`}DkdJr zAeW<0L48{n04%bj9X8oAv2=|n$o}^hvjde=qO1ys7sim(eD?XUG?1R0&C~5k*9%;8 zTv{s7pJJ+XQ0kr=1BOro_!aF-n;-rQ>0*2kuk3cUQX;^+Vt1WH_Rkq6Hn$65*K46l zZb^*38LrS0g&3QFlxsS)(4q7s7v1?f2v(EoyUF?b2*2wN z;_=IG|M5>QExx<$W10pVvzbEH>~=%+{(KigyCex3#hM;8hY4e+}wImF)o+ zl-`Cl!(|?aDmN{?!cRBfY>H4+m_eG4YwN5&sFgA@l2@b`EBQ%^mJ8)+pb}?%+j=t5 zAGE>buihq>+;EGO;uIeFoMjhvYm%pjJ- zQKffzh0c|ssW5{Y$xFur`e|Iq_CNvZW_O+CnHBhMwlA;8#t^gq!$Z(BzSqeh$p9w2_`>gfW=1|0lc+p zUzJIL9DO*y8^PQq;LH|-G%_-~p|@>%AihdZ9BID62(_&gXMW_stPet#UXXn|d{NGM z3WS7?i)@hu z>SJ1I5{oN@J7&IFAV30C+!d+7nLoU+fn+U4JJE#V{V!uwk>$P4^){COfTQr|-!ik7 zO0_3hhpYm6Sha9`=+T10lxf{-M@d7QwLgC9XqEj^Z@Frv36~CQ2uP`wwD?Zd{Li9; zOeLz~u&*#EW`#n)l(yH#aU8xzc?i&TzzIO)?(Ncnj44s{-8%bz|Hagsk)Pj0;5% zzdqki0LADDeDJ8xw8@Nzz&nRpQGd!Uk=XNeI8`Y(0;b8ELvgu5Z2(Z0k5xP}H=toq z5>g3mS})+xm6`J|CGpU@9l!f`wI21Jq~mK_(E31d^h52LG&OI||C#uUYeKvqQjgRC zkDBlw$>0`#sw$}1joR|ls~F*)%RaSpLFnX83UG4wis3BIO@O9p3j3^OLPAk=G4zC zCIc^Jkv;-in=>7X84#OyM66Eq$QiQs8myms4s88OWm;^1*W7Jet>~`OJye}YUHqOn z>lwyK_~u2sW*_-ZnU}#)NJRvt6|c@@jS1|+<)}{nEd=n9QirF3TKx!5b$%j0e0owa z#{71Pl&G+Q>7xf*B}5qII7A8AHKAtxSF3k?;uzZ>%97YD<^<=Iiq;}#asI){|laz-8YINh0Uk8!lmEh8fQgE22dOMcf67xBk06KL7c9TyvzChqC z9+oyBj0PSgaAMGC;WR%w#EU_!$+tv8>AVA?;CXOMK1g(9;8{48x6xNky27BP; z($aH^oI_`sRQ=7)tes>anjbsRjEY< z%4?_HU&U}I^w-N2QDM*gIwxa*U+xLBjXE>KwUy%AVpZ|~+ul+j>V));*1%*FNo4W{+;%_y9fW)eh2TOB8B~@A zN-G_`dM*{8g7$h;g1EKrE30usl9Y;m4ZSl4`pC8%{#a$1YWROl(aN|5xH9VZ`b|y) zlV60247!2F<6sJ@h%{5u3e1)I0bj|gZkU%#L18jHeGhACitUh zebsMC@MH|dyOqYw-qpeIEkt?=$J!+q>9n4Ve2fEwy53oUr>3l<Mw%DQ#1LBi0=(=2p@S(5?25QJjKLB-y z0pG_vC3`T0H#=ix>+eI;K;bS><@KZ$W_EMnc)PRZxMJAC`l_j%$mBV5T^yr4DqmRn zT4@+{@g1<9X_$3@yfkxPPBwy0FVCn}+ad3TFskLc!h41nM5z;1Ny}jKqnPReX?=bTpUR*Oj8_7_S)-&*k5pnS0$ZjE86c7QD1>L#QpUAbwR$P?!)5%U068_Nxj9v?PYA ze*fuB30ve@E)9Pveeq8Cy@IjEQiyS9O}_J6l+sS?p2sF;o%0Cz=#3vH2{U+|tkyk; z8%)&IQFRz#A`F(-$kV01XB1Q49j8x?XFlSjLb7K4 zMoquzs%1$EhR!E#JMeNe_08R=s8{`X_&~}T_F&D5sr>9O;gk5yP__+fRT_Z|{^ry(1@w`eWx+4Z ztjLeLWx|8ceuT)n&H(<~*8VMS+5e zjlMs$%F37#Zs&7#h1TPH8D@ec;~qIYIHuwvWm_gNRvL)ZOwpBdKY&S~rThoJp?YCN zwLT@lF)MWR%I=vRg?TN+bxz~4&ERKYCfx+_n@??N7dl2M)#t4}zY>i^2p4oM28#4CWV2uB2yzMuO zFDXxRJ)RVZS>Zu{El#zEHJ^XmIA?#cg*`&QSiUV^i;(uC}lt(?b8;R!JMC! zOg%cv01`SRa|gxFtC182j{;gh8O(O~vu(~w+Husu55~*nzScCgI3Pm5bL*Zrk7uq{ zSR^Va3b%fEKQvmrJMN<4TT#LV1L7O9qqY6u`L93~WWm5W-nK9`-4k7d^xfp!;d$D6&BWag zW!HkZ*ns$Ku$bM&2s%zHTauoc_?M*&aF(sNh`=<4HeeFglwdr^c~an}uAA?hcZ_%~w3vWvYC%^OfmnqmkiZj~ z%tVJ_BDHUTN}@$Pw~X68>sX)JA5|G>PO#Dv%c^RZ1q2V%r*qjiCmZ!ii9Jp4(*{dI zID;1af85vOBFC|m^+!P+bgv7jbq;JxH1AW}Xdk(o16~@j88n)dWv0P0j?Z|OX>3l5 z2r-9^BRe`zK4;PxT;ukk3z1NVQ-fu}AztQsR;%3kJg)G+wzs!jhWQ_V{rn@6EPi+9 zxaOY7srTZwO-vLRpw>6)PoyPc&lqVv5j7>P4A;{)T}|0zSgaLnMkvs7=JbZuYT89G zI|4S|cCl<@GbjKobt*)(oHDDb6eOPVy=W_7P2rbP=(cfbpS|%{sW?yDXX25ud8w&h zRJkl=f{C?6ob0}XbBgh4JN4c(w}vwU=5=@chgyQYe>tICe3NC|%yQK3qM*`77oE+z z*)g-L+PTFae4r~pyT2sc%GL7-@pG0v@PBs@_ej5%CSxch)NJ^qDc}5UymZAA#hK4QIyW?>s|1zcfUPf-m^jCm^kRQme56^VI zn73@Y^o`N42c^c_`&iQvX4RNq^z$Aby3_D9fy}`}2x9NsPN;kMxzCcNa;0PQ?8}y$ zcSCOU*4=zC_Bs~loi(fs@U0TDF)#T;x|re|d%i%9OnMQDz~99gDo|2XMCX3ig+#m^ z;Euug$=;A30bjizB8^YeMz-KWlN>?c!B!TT)$j(KjW?V_=>3;r^NZJW`5OBNO1h8) z>dW#EF8JRx^9d4hpvlQTPZ09-2oV*V#pmLPiI>@$Jwi05ON8$Bbfp347ezZ8O^Uz3;4{$;Y>FPq<+l z*UzQWP%3OLGGrO#FE+VdJP49!Tc_d{PTwe4HbPeJW4RIHgz4xl#1`Ek7G}p0W1K{0QCjM4O+_V^{@Z%$BzRlQBg5e3@!S*2^ zHx&OkIflVV7IG&h_o8-X2%;mrw~AczN-_b_0kpkLwPr`npWgQu#vWpSWj+N?dGL!t zA%>?$-;9Z$*Eq@Z$9532E(ID%!QPen>Kg3|^Yf=NJZKWXxnt-HO_#HY_Vy(`Mo@?v zf6u$nma}S21Z*WX@E54%2tRSFmLRj6hJ35m$pTh31$bXObCXrQT-;R6-y&SxVAu?s zR^MH%?l%Kkw)@KI4{-56UjwOr#pm0HqBqPAIz$@8>D|MnGSe^FO=p^i_@@TiLp-no z@AkThcMQa8zffiL_9rI3t0c4_>WIczD{kkw`M7hoKa{33L>FiNuN#|Db;kYYrZv^T z7}4HiUb81m*U1Cv2J`daS+D65N5w&X@- zvcskg`rB*K61dLSnSw?KW7Hpevo7)R+s%%wEb7B1Y`&AB$*GA+*P)yuPS~$FL=az+ z9=t*dtE$sS%_ogr;{+s(8uI@GYoLf0th;>QfVQyNDrL8s7SE_VCCLiR*bVbcaU}?lbf{ zig7E4>|igt4*PqNT`~p4b&?5^(ttaUT=jd+1AsW3)21Alcm81s(yC1VG05f@s75)UnMryYFUB^$jYg+Do zpSeW)Um}+A>6xp3U;n)*Hy_x-bt0=A%&N3*{JzCwOm^35>pjsgSeWZ%g)n2|B_+wMt0fzWITq7cUH;&)eu-MVk!R_r<+eA`o`Ym zC%Lkch0?@oGPk9wHd4;V3!@oUkbDt1HR`I~eERyP55|p&MZ1hWpXQ zZ_OmZNA7PBaeeQK$wvBSVP3af(Ffb0HBb$80gzbB0ws2?o-y`s#n=e?- zv3pTcl^s%nQy{@+QjIZAuGjkgLcrajTjp_Dssoo&9{CU{{!5|HpEf#R z7py<@uA`&Xp6;Yiw>iA?KZ_zmo?AGB(6mXTMNpuJE+!G-q0es-Q{JOs3LQ=+CL^Mxo4E&mrsi)63sq%N0Cv5Z_C1DdAc4GtuCGy`my59Aw(6%>kL>AZ3 zhKFBNtUxUdyvRXYep0o5a`bU8#L#GBDTsLhy;C)c%fBN8^Qzh9%T|~E>HaTp zGQZDh_oFtPguIhz zOu^Tk>G-p=!XZpa)iWQ1vQD0OkJa>4<Rar69Gp^TDQG7r!=a{iR<+l6z-1bu6v4`cQ znBtD$#dW`I1)RjG(*FkjK36b!IPZ_y%gu&~?;QTmClNH3(X1H2qUxA#^%5y1)LV1e zt$wMq1PdhK54iQ^NtKH35mv_{WG~Sx=aO9u+l^NS-wFLG&#^95qvoSj&Dr^xDn-x% z;r>2VUL~#Y!EcG<@Pv<&p#p5W)qW;W z2QI{wDO#gXl_IrxWD|GtQOOy6*N=|5oFt992@E~OP3ddX?exl&RKberm&zW9h!_ZX zto=o`=4UltY^(S8OgW4X=#4;gOc2NiRSa(|BzR!9@M~c87MwqoLh(?}wg8qoBi;7{ z%J6M!CVSlRBgZ3ped40`6~stJ_`oa6BU_0vJ4@@76?q-hFhu7jrsg459T%uMaU~gi zj1G}B?~n@(M`O1f19_-?u>EJG?O-j{te@j$?q^1&6+(VXOR2glZ-+~tXsx_CzTKj; z^g`_a@*Cy0IV%FY;ltqXYvYbj;ZmIvi-l@Bb*AXD;qj7dd57Cawa!!v-3GiH+~+SB zsFR`HWU$i#K3)z29Wo_qr$?5q$=~6i;jrnd!LDc5GO0KlbU!fG2_9rCE^Nn=?B6+r zwqYD^KUQ93G}-5BTr^NGGLv!&?nFpAO^u`LKV!A_-MIyPSQ6G%3QO17zSQvR8k=uX zQQkTE`JnqL@G0IvjAUiXVW45Soa38;9gNtzSUM}b)dUP+t2|x{jw21OGCZr%RFn>U zq+m^DfJ#}yo;nXSVe0*Mta$uK8aB@mJYk$Rl=yD0rc8}6<(9VTn@yG)&Y@?mx$eEC z#Pil&s=O(6_>qFSAko9kOsFD<+scR3!qEqv#50)$dsWsZBy9T~>aU>-=vv8!b-7a- zs^4ndD%DMQu;?Qo{VihXkHOhDPTLYcigl1A?PUJ9h4vn^=v+yeqn2I3zz|yx%ua9% zKYtl%COLJ~`sMHpqjyao%gaSvc>LJv?KYl$O159E-sTX6vsc!IeGX+*ezAQ}VeHD@ z4uk(TqVye$Udy=|hf$UOO-9Jg>|~zmqudZo{%L#OI z76#j`F8F9hDySH^rB0W)p14Rt(iq~~CXf2B+7>3dc()dD>F58OveWE?-mz$W!T2KO zt*I@-KhdT>YUS%=_`oOJ{b|@TZHzKK+%na-Wd?+0hQ#|b@bSJ+`O>2u)x5kU?oJo;hYSR}qNvO}h6+2PQ}|6uBfoZkYyx7Ew`0|uGv48F+1i&?+?@7S`L8*7Hbk08=W|+l-^rsC1zhJ1LRKv z9meTya`(j)ERCXMR}0KqYbGV*)BW2bIaskLxUlfdh@cK%-1x#mOwuRSE5qw8{-Z?1 z9Yq{j-@EOwG=phu{Xw^!1bhVamZF=MouS|_LctT+|7}CObf%XlLla_!>r+=LF41=r z=wYMLRL0WLV)`b}Rd@V8PsMjDz=q}+DGs3m?u!nq#8BTSn6nS&Xqf`%NvT{(RiWVG z+a{Ixp@&;0NTK?h7=(<}>9@n{OhTh7jPU|1du9G7#r#fSnmI%e3PcitvVbd4qu)i$ znzXM1wvI7z=C)~Y(bX#YPJb0a!|X3}Sy4aI=c>l5Yy-12^vrN7tEqL88`}HuD-8xt zaOTEPmH5`lrjKRbRTzk^{?0==6!pu~sl|-}{VLlVi6aFw{qSD#U>Nb^Rq^=>@zM|C zgE6JQ{qH_;T7IhT$e(YeUPXsU@D>-3T!Jt=2M6*z6k5B(vEGyMzikCban?WhiIf?n^tpuhp4i>!= zbptX8{g8m5LP)mq5&>Oqq;@N&tPJ@J71eK=_d~HTH2T#JWx^$#&&w~`byI$3XwmWI zlU3iL*8+7J_fMVls@@;=D71avD=4vqTYgU4S`YP8MgqaM#nq9zwe(qj4tuWJt@{&_ zC)Fo|#u0Gno;sSLvrUo*; zB9=j>V$l5l=IH@eG3^ZkMwWV?PM29kKx#l*KzabbvR}WoGU8|oC-kqd)J6XMR71_y zuXh*Iv~Q_g&mT^@n>Jtupl|Okvj5vikz$Iab7IBEXjta@TLs=_{fULjpF?^0 zTQum!*&|V1N~->bhdD^A558VL3XA?=CR1ABn|gN`Qg@YM_wO-UNbd;A1VP`M`ja}0 z6$cFcz0J;)8Ci(BtuM``@tX0RBMl*yt0aYb8D;_#egaMO2*8c6rgh6B7k#pitCq;C z5mhK&1V!+MnYgYY0H=))ucwWVEUJ<)i4jshg4p!jg0Sc$Z=oO+9~07ce@u!)sndfT zmAS`vf154{97wbIvgdrTf|G;?O&jxwPC<54-(u0|uwSf%C(nE|#4M@9ap)UoPVGq1 z4Ag*#weBCOrr2ajptywFa#>O|1XeL7vyzM|8wUQHgTAI?q`4`t+XZdZ= z@xv_7g@oh6fJ>XF_*QSXdHmBtX9R?_2|{*!NbO(HU&bf5zat;e7&nkM(oP?e&^mo0 z7}6$R31fNnteQehgxIy$T4Pf#=CJRu==uh=-Zktf^DAPC{*5z3VsD}q|8|VFKp+xF z`2{6|(TF*rueErUXv^u{_*Ct)_coo~rCqS(PC!v;#J~H1-8nrXqO|!_IZ-t0x-4mh zn4KJvHgGXFu&uAziQd0NCT|aTy3i)|JFX5%Q8Ao6-?Cac<$XgQF;q`(GCHx(3$(+g zB;f)#(=AlB+4;@;hxHAROq5EojzI7A zxCR_lA_jK9qHo91%EJg-kJ>pwl_J65k3c~eQwy`=2J^5OzZlWD^Cu-bzRV!U-Y7SJ z;dI~wKG{G=OvS*$({7|8HC0IT>?7NE$6V^0BA15FEq9L`yW(s{vfI}hDy-|3ATaJC zS*6jKi3NsOj1bs@|6SSOHsEOm?Y?U|Zs|K++=&%Pj=5~I&<|wG!8FKY zZ~t3T5D!jm{A+>~V33gR&(@Wl%KDb*UvE$fxQ<}V zylsIj(<*xKYWngQ{$tK_`JE~Tfr8Agaa>biB1a|*r(s|`wA=i&1n{EtZMRd;t8H3i zQ^KttB~ZKH?(FjNVDyuL^AISN+x*JK3l2g;q|(IJq@^lHiF$LVUD&?3!3>D6#mgQX zZS9AH&0-zx*=DvQhM{BC$vpO-FvPfV2fivj7`G7(daMox04*-y{U>odH8={7!VM2N zuf)IbqiRnQ+`2)SON!rW{PTw{-hAcv;n~&XeyAjD%#F%6cZy9^=>D9~;-;y5vAkn{ zHXRe6Q0c`>qwdd|v|R)PoRm2P&PFUNSTY&{w36&mwtomsmEhluxgT*`*cWW|*%77v znRTJ-i1-|chwiR`0`g15jx(4CjAGlfYLEj?{tT5G_E60dsd0b-@knHyJG8 zkP*0mwJHOgmYL-{Tn=IOPIBY7lcTfWPs&N z^|q)0{2Pi#3;t(;dyI%GGHBCam*oXtadFGv+a3Op|E*_y9bd!zU5BczqIjN)8W8>S zv$*&t+rc}>Yf>Fx(b>CkO6I7JE%H(2lAKf9JsSWYkvswt{x_&(PK+TzX9F*k412@i zUl(^SN1@^*httT}+7nt&OdM%HL2P}i9>$n(JzTG#0|0dv@ZbG8727}a!T?~Fs~1X? z#__0MpDB}}CZqEV3OO!tEPepaEc&?3)ayP++;Zqu&6LP#wL<{PQr++B+G0us;UlFq z49ypgUNR2PFhyfCa2#0-t^STw%|S&b4qLvkFPa%wP{`RONtxYqmIvbmo2}F}OrM%b zp`kI^=-m8}f#MHFgM97w4=kUI~7#^3sITT3SHVn zGimklSJc~4YT10boz;fFy!Ees6o=f6wcp?rR2+Dgq5nHDPBY1I_*_{0dT9|oj~{lv zFvS_VebaP->2=)$8lrGBsq)=i!j*OZ(9V%$cD2X1^2y;lYvIU=QqAviybCpZzmNc>O?WAVcExSHjnXZ_c#YK`S`pnnPrNd`|&R z$E#_Os)!>i?;UTe6@1ElMZXV2LO(9%F8Qe+S&A=r&dtNS3bEu zW8&?Wgg*|?yk0~n)tw)svA?cWj%&whU}8uAZvg8ke$yu>fo1$Hl;^0E=D+~PfIymw z2zki7>5gbE2`9oHqufvLpZ#D%_PDVXN`}WuJm)~o>Fc~Gw&gv8_jwbysPR`mA12OB zeO`b3Wc1qs@PehGnw=Sgu+XJ0jq=R3C&>ty!ENqrsO7!4Xoph1YEA~71M^3XzOp9n ze`T1T%6CGsIs`%SI&RzKu4XQ`GZZoULPR`YwdK|Wr)V9htNSyu;S_*AB-7)`7KI#X z(MWDG!|kn(Xf~Zi?UdL+gkZxwS(9jHM2Dn#i+k6oNfmOF{b+?8i=(``J!iMCcX})e zIi5|N#sbO`^31@MHxdzq05jmS>9;TYIb+bW-h4a&n={m(iR;!MHwH_ z(tO>|u6>2k#-=5_Y7nrY?3Cl^WgJa_Bq{m7_ZA{JFqXUH6j-B?b9;3z)pGFrV?hJe zHopb>_#B4dPEWn}i&@NyFgm~{$VR0QP*_dTkH&bO!X@@@^lVrAQPXhms;l;h{%pU5^#n`~^WmWhF*DEQyK zrJo_Gsq>Bqy{+RHU47^yaF0~=mNfmLKM$NM9r=RV0{;8eh8V5HH%?{Snx&@P&y}@V z_C>#bssA;-=qh-TN(051GanNQ2Oo?yd1wOB{~mG_*Lcl9pA4aM@0kqhIEL?fh7@lY zJ!E@4R-gdGz)NJGf9RdeXw)IyN42{RxtBrX&(G>Z=Ud|#iC#8FH0owg@1=fB|3l*I zYmR;3UuG+?@~^(5IQWjRb3pv{Z0y+h=l#??J$s$_znwY-xGFI+Cr)QMMiIpIVR5U&CcdRXKuo&~6{RZVl zb@_LJWRc07IPn#Oc#94FbT zX)!iie@Bb~zPM?<54PbC>pQ@VTOgI(d0eb579^Mb{-ub-S%nMTXS^U@pUv?o$S^kD&zs_0jY8{HZa#YKULc7-%dR^ZNWDPKKs2t*)Ow10@pX6KDNXyH1;x zFaw%w%1PPwUY8F^>v^Ht2R9`Y_W@B1F=>5<#5bl=Wv2p`SYT7?z`1G7?=PY=FF0_< za<3575zIhU(J(Qa^We2>*MzgiU0RfLb`H%X>_Qr>%Fc#zKQJ$#fdjY@T${AcmTF|D zgw~o&!CV@y;d=yCMK{TH#11`(RY^k)O)YI#hkU#GJCXLRS1|=ROlt3!vi~2h-YTq) zCfeHlLLf+R7Vhru?oM!bcXubaLvR8FcP9`axVyW%JHh=l$$s|!pL5F%-PNnAro3ZJ zGnFB1ae@d=epi|ju;BLPLWW!WJdVwsxTq$D8B)}wp}KOEwIBDD$dpIVWQBf>!z1$^ zx3b?a9Pf(comN6qTWrBp@nL(@UUH-|wC8_tsR{4L$gcT+eqx||+D&!l*5h@ob)A)+#i%44Fsp=rt<&7S>z=2ju+7v#)KBJ|&U9dWPV=P2-n~ z#;KP?L*6R|MM-@_#MDWRrf;bKw@ndDf8wEZOhZr{$x);;I_{OPti$lsw_r`g8W1 zDT*V5#0M>iPPcz8s8j|bO7GMOu@Hzn^SA$u%}XMY#vJrrjAW}KA>Zs+y}39Lk8jeO zYB^}-N9=50nvdWg^j8V>in({1BqVKN#owC&mic8DD4S5+cxcnIk+*bR8@~coa3Mq^ zx{-=cII(Vts8!*U%81E7iGhdb25xn~FH;0Y1ZD)5FI`U0A*8NhrR?TA$#SHnR4*$S zCn+kkzo^PDLFi?&Kz&e`kK((=hG2ydLgnCiY}*xDBTI9Kx1vI1BG|*zg09VDJM=|p zRO(`xt6!3_P&@_--#?5QNzyaNum!{5I+?QYsA0*oX0veHKFQ;Uz~qy=eweSGWyPCs zgn^dNz?3^Y%~IApL)C7#8vOR$i`JeUD){<)nHS05FrBtpc=9?_090H>=tT`@L4h&S zXYzJ_9qND!XVU~}xJBwIuk9fG@6sRipN)zh|0S&~s8oNGqgmd-1jt_$?^vs!`39BlJBh*+6S#Bz3$B)Nma!sAM!_ysMB&ES8ATO1>L{Xz z=?{*v)Fv3Vmj!U%<{uy@b_a%`_)%j%SV2<6ZA}nv3k$hP9b`;MUb39=w^WG?#g&Oy zA6}E)FceQ1H5Z=C8+<{TiF{ z&;48X7W;A>ZdVzldXf}tWb!L$znSJWtJ{kQP{-Z8=L4IgW7O1i5Z|&tZ~4pR-L7b= z`;f2>7LsJGLZKKdbgvn; zff5#L23oEvlYreGc<21Ty|J)o+4Jdnxs?HQl1wgdOm|_L(YEW|Y-T zZPjz-;eOUoxnpu4)i393!a}#xV|hcJ-!8X@!odF;w@F;HeFCrD?zQ@Cv4838Z&>%X z9f&8M10B4gdxAZU*Mb)%kwzOjqhrd^g!4nfttAud8B{t}9FSuWIGc?`Ihq!jQ;dTg z7FF@(#*QtPkg(6lB~W0K(>YkU^0AwxmH&dh&&`h|>WL&!z~G0@>&Ej?M#*m{iWAQ7 z`}H>N7-$jPR>BUnJRo#-pc&`pdbnKKd=~tb*f3(}3B02{phcT0@2AF_zBCy`*FoBxUm%t}`R(HgD-v@tWA{%LoV!cIUK%{89 z&9^eIu{FcRfkita4H0o^n!)<1s4}bT>~k}cFOxPW-uud>5~;fR>a;K(!I3H+;Fz5z zq0%GxVd{;%2!$uykk3&&bTwhP<=_yL2YsBLYkS(}CT(-~hVET#Pf#?i^9zS2W$^4a zPZ5_b54S)djyI(*=+^;q?@O`!AJFIeU3<_{87nO*-mK)eQ0w>+Si&FUntz8xl?&In zz(O;!b6(S-P-IZpN37u=Kyec*CrDtUwYoEllcD-pXm1Y{Gelpuzo;6oUC z-3SEDvSOylInOs75|RE6!!&g_sYa%xitlo!7dtZtQG+h`j(Z;6IV~<1z8KX2coXY* zGQs`-Y%}f*Zb%xfkg|_no9kHq_Z2g>soIawCjXYoavi)Y6cu0d!#3E)djI;ZaEh4! zl&;?e`LuoaU^J2ir~TnqEXwlTt>j|12CN}D`q-NaDX3v-t}* zUAS7kS%x#41c!(7*c%(AYad05_m!K=zktB3m+*Y0JWQIb%I>U! zt9F0e0#z*{2kpGWEx?f-+U%Be(b!I=3?0X;=i{HJB2D=Yx1=MJudw@H*s$Odu)amm zDJBN?KvtNL^s3L{PYtpf7&#h=Xx=BM<#9S(X8~9RC3&dF$Xn(kdMF*fqt{MPhsNf& z0z0%F?05t?U)$`xC`k%RQu(Mvl-HO05p%zYgr59r(nR(7z*lS|99}nZ#c~|2JfC~5 z6--7c3UPSbzMzdN`DPX0ozri2a^+5lUa*a0(5O>m-~icIu|^9q^3E+^LB{`Bu@ ziIHjK%YzCl)(>E|H!#{yUPLidwokg-N7walxVNA{#E!X<2`J-_)DBG9sUc{ntSn{5 zOmqmv<#yAeQc|Ld>Iq$49%APfdGpa?;g_V}3e=J@;Ls~<7zazWc&d(IRpTKx`sxHz1! zxWZQ)#~-`Lmb`rjAKsQojn& zxMX?D<9y}1PZ5y~QV#vU<$%YXIv;BWI_Dx{zzKpPY49cDvU)n@_ZZ4Luoz=7pzTpY z{IMuDl0>8V7z53oIt&1X1YVy)*70^U+_JXRVD-{vPjAzsku=_rV^=J{XNiXcsEO>) zsbp|m)0@&@hq#5d*pxy@L`l?GL)KSBx5Z;@lcc|gitxi+8?_!;IrlqT?mi>O1N=n% zKuISrYB;J|KA$5ZzgS^d?0B9qvhMqy<*_;tla9mnzJ>H6Q9N^7%fax8dEMVdzxJ`! zKWf98`a3e#hEx}Qq11^=lTY=&^8a^%rGe1Kiddowfsu_(Lr-|I;cvFQ&8YjfKN1GQ zza1g|1(e)BIY-^!CL=IOCk#juZ6X#;PB!{JQMqO=gs?u{KpqMv0DHZ1J^VO~i&`Fb zyR8NXfm|WxUSkyrVIKGLvWeJsyIIHLH{`z9A-;6p{!r$Ek8i_+nmb+JjXra{VQNKK zM1=%2t~fW+N4eMx3?6Tc;DG_`d#gLcL@X_)OJLwzXtb+DsX{i(Lq#|f-SSfC?R9mL zf{iRItRB1e?2kpGZhwXuEZq+tnuX$px4!kUy~&sNii+77M7^c$y}#w$O9YU>3EBRI zGUFZYPS!lnu-jbW86;pj83a@?+_BbzDV!CTn-cz`9*$5T*|UMG zp+P*Gy`Rks9Lg7|UyKqteml%xiO(}uhH}J#x=%!v8^?6>d&HXEJ$HIGCiBcm6<_)4 zkWI)^*f=}t!PAJH5(Es=h=QSM+Y&`(8>QkD-3(UU$+cIT?QkFAibEp4I1$J#U%x-~ z@56i8NMiZMwI(|(Q^NQYXpo)sCG!71dS?*>5ji`((f;NrMzb@8)V1&D@99<&%S*=E zbd%)&8L55h56>q_5n#+V!l72w-?k&^Z7BdKda)-AN?~W)!yPKf39ekBS`! z441)SVZO-#MfuXP04{gS1*L%*`h-y*+R;b%stSbO`tgr7Bvm1pRptjOz(5dui(+{l z;nq@0Jra7T0H^oD4v{=~R##ZY!B+loxvEqK&mtxHRQ>F?!ob1CXJc|dQbmAi7^Ico z5`nRPj?Pal)>?7i=J!V|m}4-~V&x$OW}5%!MFPNf*Lcep zYhdx=sZ1g=swd{AIw#*7(5Yv=X`sLzG>T8ha+Ur2SA61?g1^7z*nLX(qh{<|N?=MD z-Uvo@4O`R9qqbepOkHFDD%F`d$AK)uo86 z?aBmqpf7Kg79_wa{OunH5RiQSw|_c|-0u+^ogm8^=mj4P&dU<%#J-f{Vxme;Hq^Wm zc%Ohv(KTb)KW3OK_n;>iXu>`Mm3q4}-kMOxOZ?Ae-*vm$zZO*p*`f z(82Jq?l8Ky9y3$-Gi=4GI0ATNbq8~ZrpkkeKhs0hafUCP$5`Y>U?LGHZ?dn23PbLG zxrf6VBSZzd>Hh_dMUn&UI_$C6$EidnvunE)%s*M` z2wt2hDdg91p2!|6&&tTYw>a-3;~l;!4vC_a6SA86`4`*t#}~34WIz#3X}+3 z&m=tj4Xf|AbgvOkV-ZZfzwV3k008?>0-N(Xwzqm(<`6HNYb|1HzS_m-id%<7lyR38 zQxU}zF{qd$SM>&AGOdbbtX0>{wUFbuHhh1^rC`82p%WgN3g`6l9P_2E^&Y#0=h4I% z|J&-``X6Jb+a?A07l{{7>pZ`)WMMm-@@|8=FiJZX^nzaw6c?gi=$leIs4iJdK&eGW za7X&nUrwc5@+XjcK`Z@^8VE0J76;NDMZhu{6}+?J-v~Zh7>huK?s*F|-)m_)D zW)E3Td+5yX3mB&Et5he?<*7Vn9|@`DFKWz`*yzvK2AkTDgC%cv3C z?cye@g@d&GmXk42_C}ZZ-bp5QzcL@--?nDB10fM{yios=X^{R;+ihoTlb?T@d>FV8 z*WZxyMIx$<>OMnAcz267TN(~9vyX4w`LyS){&fAjvoo#7blKr?J*|xL6XVeG5S(yD zBcj*8AsP+M{plVC$Se?E-)A(eb0wDTx8I{_Bt`Bu7@0R@KtiDj=vY8!-98Y4_SFAQ z7D7&amYv7d;~|T^bf23!9ri*&2Hb!Wq|bLWJ#yTBT_?D;v+Wr@7gfT3o9B6c{U>%d zuInP{33&Y}ZxSCHDuD6AW!EJPQQ>r74zI z_%66KpYYpUIV_)*KL+=q3uWebZ2hqFk4xAW%Ir3~;_FVVP*Tq1SzbmOh$_a`!euyr*-Q8%gX&vGm8jyu*C~xw`=VSoc$S$NGhH^H{K5wd2;@rf$I0|t8dXf3^84RnT zV9mSn$(D6>_eA7#s({j{PIn5c{p3M3JTa(?{RTuyjZo1e&deX+#}7mR|F=GC`JWFS z13$*yX~Ai5#HYBt)v@Tv@0%BF-aKMis?IU&(ZYRP0}yi?pL9Y+>*wVAk=c09~*QcfGbo_SjGuQ`$r|>!qRKvLc&GIPsm1n>FV&j!-LZt{2z|hVEvKz zKiJxEIsuN#(B5~rh)A1t0-M!&zd!AC?K|c;t!zi7Eb#3hY8{ zq{Y(?`N|~^ewUI16m5v3G3`?*@h~q5`}_33%(v^Fv(Mr5rm9!DFq z+g$ZA`6mn5A+j%2+3MTZ1E@P@M`m+{cQw~GtUJ%L=)`)K=JJBwjqEHZV+DlIl5iDk z)9rfbl&?`83-`;Hi$;NX!g0`|69yWPtf0m+;3$H|OB9OOf#}SeXoc$32@^nC2}c4L z{7sP{5V?%_&s|uOz9{w@7ql4#X6`DEMxk&}5rkA<)H|vdjV0jTI1NU+|22bx<0nDe z(+f8sNMgXRIL=wfl`zm53Q@u~RNUfFM4aA=QV8Uj+@35GPjcFq+36ve1OmvQw{eAp za(*KN;Oz3tWTm9ZxN|)axU%6FB_j5&iuJ(-1XW?P>=}$P6Jl|SM!s-CS+MMWUR|3H zYrd{t0ZJo0qfy5TT=9fa^RzAQD~gpIeU^zfTvl#VF|sXQoUT11N)3!$FEdCZ9#yogRS@ zOf>LVp*y0Ml;UCv?Pgava=FYNz#pK-p!-v~My*)5KeO4`2Ji>>f#2BWjb3+jV_E#M z!lCelL_}bK1w>Ebm-HF83_c$$cDuD6Kx01QMUIC=5x{LQ3O(JVjmue6UlhU{Tv}V2 zYiF*P6ye~zYuEVHTh#IP3TZq9QcJ~1mhk^GGf^aO;)x2Y!3z$wE7|cYo(xWfg~Z?- zgK8;t9TbfzucEu#eS7muE0kh81Jl(Af6! z{1q0n`m}-^Y35h;qc7GnZ(AadSHPgr`%SiM*a{szjLeA?WGvb~>`o6NU#_?5mg9d3 zdN)!4OziJB0ud4k)LYzZkm*2_bMhJG+P{(W#G-}DR4e4Gba~(B)vWuy@%ldOqMs}` z$b~276M>1^lheHGPJw>GTpyTYM55#L5M|uYt=LJ9^;X*J$WanS8qx3qQl{SHf;jq| zU3}WhtFbJHqesfC$)>EJ7_1#Av&fxa$xRC1Fl+?+r{V-A`Om2Vso_pfoKd>GbSbg* zb!fu8oZ)1qXlZ3IAg@}NQZVvkB(hBgC??ajam{R;^#&y7PqU)ih2sZcq|8(GlfmI@ zS?1clg(YhZd~*{oI&glw%;5>khcuOTIvE?6!0=~F59=qBj7Qrv;+#PnNu(Iten6En z7SwyeLMOc=f*6nbLbu%tc%+Jo@)yc{&kYg_hC#jP`b?iK^PMS`$Ajtid>vofoFT32 zV6alBnE_C_*O^a(!C|ooRA~N^&l7`Vu~`E7Jl*!@ibmQW&6EUpFr0Lk`R1xC>Qi`y;|IA%f7Rst&p`(jk)bh+ez@bnm5Nd2^uiODdu^ zzKbSbT_%U*P58$j;b3G@EwcAjjhy=i!!{Fi1uKD86 z0%Vd})6p)#Tddl65IG8)9Uho(EY=@`t71&X#l)-qN^V$C?;qz-jBex<#d^u!;F zp#rstt@FvQ+b+`4#5A~az!~`naKi;VJ1`AJhG51xXjkmHKw}hsuFGr^*k;ta3%?(s z64D=E*pDZY#}na~Co(zoFDPdC1;%tU1VmXC?MdZ;GQhr7^hv#;`?VXJ(b<|;+2#%w z5biD}ZkeD>X!{;DUl;#Tk%fI_V83m@IL$Re-}7ZYrk}SsIH)0AMplwb{W^ z4ft#t98MJ!Yc*6#-Q}A9b~#p!w^Vq)K7$aRc0d#%z4_+WS`k+5qaQ~PDY7U($hqNFE(d$xYj>;&pR5F0aEap$?UV^aPYrEA6oO6NCERyW z0`6QP-KC0t6g}m8E((F*6O8pT{!V@6IQCx2%0SfqFk{N1IRQ0};Dx90^qan5iqhyh z2&w4)F;eAybt=0z57;U{Nx8Zs%Q=r=kV8zi>v)1J<4|m{+rEa;4-qt3qViDbtvUZt ztHL99B!UXqkOF>17?fHd43(a}OMv79_^RD?7SqZ<2EP#sUjTk`Wtu1-=L+Q;tJ#7}^q%?9 zwz#)mU!EGhAAUai#(;}43K^wgT4~*T)mtwbu4y60fBNjk*@PI{v-_>QCL_hTE{Mx_ z3??yNNowovrG2|^6&o6BZeA;VmQFU`yy_`?>L_y3fK5^J#Nh>h?CapVw6r z$=`3eXU*@Dn8so^&*QAM_+{3Badr|oXwjH+$)iLTkIAi#oSqi2Y4oo)S!cD?~|v z-J|pG8bo^Ix<)6;BLapCw8_deb6lBfyYWoB$_9?;OmB7%qE_z}pJ8}#zyo4|(G@-G zn=jH%06rdvcV;)7>UqDEIu8h7c$0zMPSa=PN+OGqwB@QZuz<~Kb%mspE%W%_xKGvR z#4$E-n09e=%JKT~m?K=h&I9Q6t%DFX-^!DA6A%5SpWvpe*646gP@FR}7KQQp;sK~6as9LN+h z`!LYZ3Pb@yCcP1;r6IjvpAhhPFmSn?JAfmj#`DHfyTyI7WtQ(f(b$>#A+7G@4fhyy z(}}&kR(;w-5{Q~64`sv_t-x%{ytEmn9#e`$S zyA)c;{Ui3y1NJ!x7K3X z)H<*V+kFxwqKMi-;y@JjbF#_B%KPPJ-h83jphXO+TE7!4DPAtKu(?T0HjkC_*3Z{;j&((h*NfmLBwu`aiz_beOsF^FG z(q?&Uu43J>9dLH;mx@B^VEZBTXF}qnG8!E}*}5S)t?49fiSB18kkvyj4~ebUv513_ z#t771;tpDX$Z$wU&g}bmY|X%7Ao1^I`CpNM9hYvp z#roMWb1``#Q2y(Zw=Lv7oG)RnC4+%=M_Oz$jl|p>sV>Kfc(8)Y@=fa_QPlMUg8J}$ zr~oSo6);jlO5F^tUq=VV(v~?-bNjL z7WMRNN}i4bu?&IBPp8RqYkFsE=u~>a+ZZ>b{3Ny^r0qfPt_FDFoclBqim?;lN7urJ< z$cl&+tf%K5Z$Pi>tgf9j48qigK4=Sy|(jn?ti z-Mn#Mxh~K{g5WwE+I=Hq_q}nrkx{c0E;t!+So5f(einy|V2T1SMJk_m2xbxnXao?+ z0DhroZu-q|Hz(=-ctcO!)1LB`@0y)xOk@KneZ@xSlW1nQly17at`@++&S1}a5RyX$ z*eE6m7_b-vgWOXYjY3w#v5sd-0zGezHnaR*C2-t5;0ux+znG4^dUyMZZSmsMvc%t=gS$fBKnw>A3$+|=^$2mSnjMOgCV zD;p6-hRRx1aLCVYu$rZCERa!R=S@@NQFT5qU#cdwztV8_$&7&V_ea8}=bKh!%1kUt zN#_VPUpTW`AeRPL+nfapKGc!9;_~>Dk4z52!{I#6i{(?a&@NZfXt4bug`c;uksq@^ z_M`i!$*?jOx+sj&Zw@P-V}-tG>`=hczl6xcSI^m*1&>$i8T)VUc)=1n1YkM>{gdd# z;6huC_$~-)w%Q&1UdegA8Kx9$br`ys*D!ysM+0S6nHQroFp${u-jvg|;AqhsdScW4 zdH_Ivbo~s&t~oR6m|S1xzvt$sb5w{N-qhD#Lg!ful_*^eKZN9cw#lPa&ZesfuI@dYp*KMunwM`eEp7qJwr%h%|4*{e^&%{ljxKz7tjY*zRF)B{^vH$x>*u+F*% zhv|jhr_%)R(6pCDSi2Yx6aeoHan_k4`jWyTF-;3Mbt4h_1~(EowY@HWSD;gKyh3+X z#2X5Zz%1hszf2;V`wTpEK-mSQzmVXcPDuQ0$K9d(`|VhjCNwK4SbxKgoM6r#2GJq6 zb)TX=$`ZYHzIP{l(MYtC$rjrcd!DL5;`g`y>XfaVsey_4WE!Mr$Db}M>!>LN5kb%c zsg$04qL;hYF|R2veM7IcR~n`zBO-*(Q*W7vgDE~YG{>LL#5$B5?SpK{!)8`lWJ7+v z&a3ve+9aV62a13e`!X^Knk!*I9Z}$Be2KKsx9kA(io?+Pua%wPzt3+_eY-x(_HU*u z=+G&|^hVt!_ORN_^3i;SQ-5e8AAYZV5%$!{-?=BmF8%TbbsSRxgl$*V+zLin|ADhz zi+4L>6}DO;abDg*f`Mgo6OXX7`$ZB!0eV`92$u+SGhe-0(`cqb*O~)fA|fKP-r=tb zpa>A!fD`xKa-Bs27VgRQ;j~nSdJKHB`uidf0D2nBkuv|4Ku4h4Gdw6M>ok{x3~tu= zv;k15Ui4?PxNC5wKt`k{x?)I#?E1b0!IJ(PU_*`8R#`!*we!=zxF65wL;r%2{fqw-3!K1x2LaHPTy&)avw5`)Zg6qfS|=|$&H3g6&=kN2 zzmp053vs0G!}nW>D9Y;bhST;p6d%O>gy@8-!1_8WXUUPgN(AHq$T-%>KxW2euV zZ-#^3;9j!Qvho2a;NE;p-2Bl>iA8DnlFIy)P~}}W=sJ01{5cVa+^I?98gl&aB&VAh{1i$W3LO?fCIAWg3x5cu`$kr zy4(b0%DI=!t)0N{_!~g+c)Li4-{3U-*Mg(x=VW}%n9bwRJfL?hm9OqMrXgLtZTozd zIyq_n__KDtKnYB=p(x$reuo&Yv*M$J{jy(s?3VFq&98&zQu;MmUlfbfA(QPZ#=TRp z;7KGHq`B{Z9h^D~{-@vN))mT)|EE3!$L-zNNiQUL&CE8VGd@uI;uL?q)M{kPlD;@v z&>*n-q=ShSy!ylb?Si`{F;D>vmtv;gEHRKPF^{aU8ptd-OcHyE-f>=0iSrKg#oxRb ztB=?jy>)hi!}lvUa8DP&sYav#Fu(3=TBS+*S2cIr4X2vO8=F);bbjNL?|1+i(}RT| z``)`Z`G*lgoskrPN4!5&A4c@iNk#4b-AVfqS1rq2?@4o`TsX?yFvg0XyIy!AK+ztc zFCnOysi**jDOTfQ3fX7Rhn$sbM|B^X2UDV0Rs~|Sn+8&bYZn~F1_cN><~F#VOTRZ1 z3a_zKv2zn%1y^kgo<(lAm2u6qS6<9^x!4un7X_m}awz_`v1i%<0;rn*eWopKaLt!l z7>IsUkuiTx8!q1PnvoZ#?Lz~(?Hk6bG90hrhJKdt45$Df4uk#`yu&ADT+Tn>NY0efEa$ZNaE9UUOB1YY_C$OJx!qXJ|xdGlubH7Y2qGUp>p%k`AG%D$V;@t?Q_KVMl`kAt*oPw96_mgn!MleUn;m@xS z1pe8HvQ#?R%vt+FO9G<#S919|Jk7{hF%{9<>~e{D#ve8gLU|h&ML8GMT^Q}Q<$%@F z8JQ1X;me2ye^J z0FtrI7LW)&Bhalh>AzY5U3c&pTbIpS2pawt?JwFA9}KNBlM+i0J)KXfa*6ASELY4| zO104p?Do3mcbPX<$5}uX1)Hl&SZie^Ii9F+FOlZzf9m;V+wi{@y9$P_y{?}}B@y*O z6WfHNU_nTID}?!@k!lysXwB44Zz`epPI-L++!y}gU_cc%7LQkvC|DLzbg1gdxA(mX zmx1dv(++gxl!M!-W6OI=^UAhFgibI&`@CzXdZ2&M2qGfOUgw#+yfCEwyg5;AzADdG z{-SpMeLC#%1%2gc3eQX$y(|qGcm(4mNiTf6-1qm^E*X^2(Lc(Mhy)Vw&l!XJ>?XI)QSsX9z0U+Rx&@V; z6CINQ^8-!_ff;X7Rit6#szc9f2{rP`Fg{0&yF!_sS-ZBL%dM&{{kC$BXJs**kpw)i ztNdR7BIZr!Htoj8rK?7zT=R^9!5i-RY9(~|JXMIi2}B)tMo%FmL+Olza!d zj011)An^;oAfh6Cn?N>VO^iC3+fL9HxPpRvqaDIo6jsczo@)Q4n_l1MzSEYwuOH5M z`bWsD!2sf_oz91j+{VRR=(F)OpaLZ1Fy%yt9P)YY%co5!77;_j$=!)# z@puHTPqiA`eW-0iU^^c^%{t`o9CO?sC2+ht;;m-(Y^G#&T*ljN{bASJ8cV!%cfYQF zn0~7<^`oFdfD`H4!+NHm+Cm}+VS9M-g)bQm8$SQkAh;KwcxFc!Sw>-G$NG%5fOjd<_yaG8_X7jq_7n*y@Wa0$ z@qiTuh>7#}R1BLrT(7s+xO8F!!C6G_(v}%o{9>=+x_~%=_x>o(PZ4AEi)!2Yj$lRe zkvlgASMd6D8TeiZW|7;Jx>JWwiSu^_#WiB!5d={6|6n|zmpQN+gLz}BY+aULRaR+t zTy`!TN5lbu^NF-d6GySC_r_=BmSLvwv)Y!Hl&sdw(ZhWgqe*5eVxu0clh-MQ=1<~U z*BXnE+iX$YSx;|0ox_k0e^CK&Led;bnU6TDpbQ?;!i3xl~Jyoumm1|9Xf2?!Cd&w>Ufhf5X!5+khg zqFXm77LlCq4jrKX`R|fTQ3XQeFFO+=mhy|xMjz8eNGC+BD%b@WYJKuq-F~c(c;rjC z@ga>z<`+Y{nh7hpckE1|07e2h!rSu*`}MV9UhZU*UrvRk$ODG-iRs$1(8&NO`R-9Q zW9$=E&oK1!smnH_#TR-0Tvsg+L&8{8g3flmDZ(hUej}DY0BI%E;;Y!h5UA$ljoUsjO0ReJ1xOJG+%w$mBe(78|}X^sRR}#E}MXWfuSC-bNCI_jNrWIsg;`5MiR7DX-1U1-Mg3?@V@9I`}t^(7` z{fywTlppAq&H^~|{ZzpMStY}rV)2GT@XvG(y7&=3{xRWcvdi;JJ9i3|g4CCh^z**- z4>4<>tAbc_qY;I9(`$cgS&Il@eD`>Iyj5J@WST2rtLwSV%5ZFr*q`j@-Fp$ReUT}ti7IJrG;t_`5xR01`cLPRxx4gR=%i!X_iC=-i((6*dL6Tszga9Vf*|1k@*91E0Ti<)z!| z;HayuHXWNnp<-|KFhnHE_%IXReOqy7+z|!D_E3QGOx}g2FUTj!RQVl65HIVkU~rfg zDd(+M4F@h67~uUwaEB+11vo+XWV}Cr-9$QN4Im|cs$%^Ty7!Sv{}@jvXu&Ja0F7dv z*?i%lvCuTrPFX{EkPB<=RjRuzniXW88;SPvh7Cx0ZioLn$-vD8#KG9NdJ{dE*MWuf z@2yKk^1pXQqH@54KdyMH`ad6qKU*3Z@LB8As{|C*;e;7uj5^uu^Z`Rhv1{ z1C#S9BSbOtX*e3!xb_lxwPRty0EK;d+WIZ`{oF0?kR@ZR)ZnHTD+DwY#5Ss#%sF#D%+Y+0!ft+5AtL>SWCz^h-Ktue*mrrC%+ECK-68*l z*SITosSn5J$bC|IiU$gAEu6PsTG$_2Q=uXaa5SQbqa5-XBS3;-v7vY8TVuOU>aDNj zGv``3*gAJPpcpK;nEX zS);tPx&u;Jt%oZ%pF{l{oB!S#(870V;BJIqq)Ff8{k!5ET+hJg%0H-9v0(xT`&=<6 zjdH6urno9iPfy`_&KM-tAD=vK)UhxWtUIH+;2SCyw5w75? zT;6ii)LWN$9L^fE=kmBeCgaw#H515)9ywn(p^R`WqKIQ`cz!9snR#E{_9|=C8p`Ak z#!ad>8;$50;d*HLShE5)ng)PE+H!rSE3i4P(dQL3SMdT}0huVenXD04X2W&wGV`s^ z+QP-FNeNB)o{#=f_7T}D)|$z0>C;}~+=0-Cu%X>p>hCNkqXh=tN?1ZyfI`VjZ=wNp z{?4N4yjjOm0{EI3P4gs;z@1YP6a)i;i5vUjKa}L}xcg57VCC!(aAALf%m1GbHmb8ixWP6N<=| zVg`a%0ajbnx8s|?>owvCB?9%hb$}pKRL{3rT-P~69;Lks zCzN@A`!_-=B!mYDxg0QOs{>qV%xWz-;<}Bs3pswXYp#ot?&&lZ&tdCVIkbya{1qf}o%{?g< z*Uibd7`To+9hhX;DWkX*$+6+aB5;Act=fQER@54UCTdAP#|sD&1e0u&kkQv}gX_Vl z*hw>wc}&{_bNYRAd}I1|mf=NOrzMC%FFId7=%lp~eUrRtU2L$6w_Vtt&)fr$G%dgkZqznjzC6{+PD;qecNxDx|EajHw2b}h>C^yr_t}V zuERo;vlNYRPz;z@+*TP-SWEEDdAaD7)5``-Q5fTpVuDm1r6f_2hPjFCP9CgiMljAe z=d-mf^|r5omPP(Ol^gw!{zaGJ8Vg0c5p$JKyp}Hrdb5Kuu}3BX51$S%BqLYMuY@I^ zIm$k|o@i9`NX~pD|1=6LKu%7WuFcO-12UyI`oC}g^)Jp$nq=UcSFHG(X4L*(!>mi0 zj@Z$V);LN62JplA1ZYFrUiZ~?pE0i~Ds$v@rcXvKwy{kYn0^f>f7^U?#fq3=jIMX$ zC<@U!yjd3%L`4R(UO!Wc1HmtA0c4UfpvXJ)EO62}4KIss4{jfPbi+J`yA-MoD+UP9 zcdbpYn*NUQcrb73^SQBdnl7{)thZ3KyD24~;rEJx1m5Yp0(Ar4DF7EsgyM3#f+fNi zNqcL$`$O?ah_@EmlLY(}j9xLG*UIn$u-ih$vUUZo=zr!ajEMml4mEn6m9R zL};zWo3&I}nYOu+ps`>>`Us&9$i{3im|B#&kNc5xGYl^(aQ(G_CBN(&ZY=L75FKot zl^HT6A6R7mupE6=5A=E=HRLSnFY-#Z;joC^^PlJzjNE%ba{8BPL#2@Qe3||X%tq<1 z?aox$3L?(zMJ7!)_0ZjBc6zFT4<{-B-0COU>*&$WwEzdu24M$ek4sqiQ2I{#ZeRi~ zn%#DK6`uGg7p8ofEt$6lIko1D2kgi6)?j`}w$9h}!i_ylOjKFGA4=d(?ER=04GY%D z)*jyp)rl?YTq0;WQ*(5!^BB!9T+?08?fVg>%s znsbA}8+)SFp6XT^&sKP68Qd1Hbp%jCL;qB!NQOvgpOj03bmUA#=I@^6MbR;K`2i^= zAjW8nU7GLU8M(-V!$#zFhN_%>R1Q!4O>bqct15h%)+Aw3%B@l1Y$Oug4aGj+E4}J` zpv2b91dNPal4Z<0*UD}SnKNM-nQu3HGb+5M8?!+~88z_4A{Nkv@0A3AsdrZr9<~F< z1mUCbxP+iIBn3tyO4GP4>UYSo`;%N`Y6voaviw%h;XpX^{%q+B)9q-n$k1?}t{7l} zd*-N@ zYU>?of&QPGr*x$qY3FYg>wv>kgme&|6WW_gg2PhB@fA}Jp`0z-XSeXu8 zPDP>`=Qw3J2-p;7AKhD$vOgDPsH#()`ZUQK}-~%yMxM_aNRAmF5{M$s>j+ zq1F2HU@Q*>&}(>CY|oI7Z*FU_wP@twNTl^qr)I?QU7Sv0HWwyWD5UY-eN6VpSN_-kVsIFJ*xSeP#CC#&NZP`w zlqlkX6No#TCR1e2`JLt-QAtiAdNKAjS>n1QX3Ejq`MoHZs|}! z0qL$YH~N0xiE+lg_n$ikW1wu9Yp%6sJo9;eK~imj@q4&Dj<3H30E&n5eJHIY9aOS- zgg`p^omPOPo2ThChzVR)#|!cGt8K85ly8vsPW?~?La$%9mQhvJs)UtFkis>I#65>P zn*TC`x?LWNGN@9DxYKGrMhSh5v9nasrW3*|1yADAjA2 z_J@VUlW4_nE<3s+iZ1l@&(`E~_pdl&zpl9UM@ygI%Uegi~hRxOUD z$OW2-xY97?QYxqRud&toDSwu?Ms7BXMLeR>;n;qa#~c4cQ@ocu@X;w5@r} zH_aA2bh^hIf`mJB))^S?-#Jr=-|=FmbEBpUe?Pt>??Ic^%-t1Nx#P4v6_7oyprTJ~OX!1Od?jzzG@$%+P-_3%kU% zETtNaL5tevBM^33eOP9K@j49_ppC^F62(GXaG5;07d4t!>^Jh@dWqkc02<=Kmj$(b zz$e+?nB{xJzshp%h7kMnqN?r(f3R*+YF3y#kOKCzcYH-T^-cc0QH9mRC3Igkl{r$m z+|dSABDUMbYg~3g@e`tq`jI=ZWx3?~V}>^4gwW1*MHjHYi9313&B|y5D4Veu{%}1w z>=3s8#D_vJ;Ed`3J*EE@{M|J?x67XknL|&XU#^Bi4sABVve&n0^|t-2;cEPmq4liPx`1Ic~#Q@PE2jjZF*p%hvZT{s5wmOmX)+gR5I zBc3Z(nk}cdiOK6qean#$=7nKiKB}C54}|(&AOoa}t+Mfo^b8E8!$01JONImTaV&n2 z_u2E64>`(R1>VlNP6iVLW^W!#>@9R`jqU6d8EhKWuot=;WJD=X7H26d94PW`wM=yW zB&Y3t1d-}9C7E)%jie+0&=;;+MWZVG>slW*6)>BmVDufs9&Ykx@4@Z-_G1J+u z{<~lEbUtO<$7usnmwsdkj0GA(sS8y9)qUl}-Ncj3pOPEa!gY14L>vMbe+AOdeT$W| zWtAK@JuD{m2R?@i00AE&?E8|)^nvIl3Natn`ISqGd?leIGem$R8kZ^it>6#VBOKUd zuHVnvVh5)k#jKve4Yl<~b_8glZ-x(d{fC3*>eCTYI!)f3B_~nCkxpWpbh^Yh?+p9s zFMTlT62DpEyo7ZRl+CFN=>L(%WWcs_s#%Ukh=7E_K14XO=Cdu2hW@kuIbz={WB}^U z_C*@V68LlWW5gvxC_)P2amQPe-nKB<*Y+0IJO6gcv;M<5Jd`ODOKYOaIk#ox6P*3O~4q)sI7GjsbHT_N}>=_ zK5;C4fsY_j5%S06ezAL<4+J#*d$$EVGNjQas+eHJ=2DS7EeYK27;AWulPpTlj<;->R_qt`@`z;Vj;C%T_+1x(82mXFhFb1UkS z8C2|z$sN+Y7?DcV@?7)8^Ilh;7$~hTuPq<511we^*P;&J4YAhOkkjv{8yx%kgZ)2w z;0n`N!J-x%?_uf2QpYmZ52#JVPk+r573&VAqJ1SIA>n)g&eWzpu0O=ET4+G+3wCXS zf7b>g34|?d`sT6eu09rFWPC40{9jgDg>*$Y2TzZKWLv30>o1~^HOnK;5WJK%>+;in z6Btz^d@bC|QB4f<*^onQ+vUYWkiKYifA@zO!i=YYi_nSGq(=kG?nuTaYln2DmUG!E zOQ8}yvAL>#`v{J~dMG-(g6R?&mx+~+4VTHy;br`hc{+SZvWK}Ugj^4&3?Mn8J#aT% zR36ABKEQvJE_?kgbLO-)v@e9$XYbc|FZYLJYLrmKOB2a^rX8dgfUekj<4%|$%t~o+ zg^J&AOABMf6%5un-u0rs0ush?LrMsWS)|nK!o)TLb|DF*;Lso9h-}&Y5wn3D zt&-tdf}0CzX9-er^(I2L@feSM4wW;P7C)Dr7$;Ymv`c8+I~W~^ z0rgnOKNL0eM{4q2t3;9220B0sGD1uUUAn+9t0Sr8B)4>VzE^+-N!0GObQN5WCmwCs zS<@(8BVgL0n#SjI|8AL`1`6d?fJpqiNg)*kIKC1%#(z^AGW#g;C@9-fEKg~>xKk@y z2V!_m+oHCG+1)M08RLct-?Ivuc~kJX@N{?9;4szz#T#BaUC*Ekvew4di}@5<^{wy` zm~=cwz{wjJ0h!fvbQcZczZsIBbmx*-0I1hUC5fLkXNKr*+7alwG0;O-;QlR)D*`=2 zA-VN!-}&+x2@|#cdCq<0K6<8IAfSeO4?koJvahOE;2CbZk zX^5o8bGF*Cg)}(EG>NSwou;Z+-hX-{M4si#f{fL_8^aNf;_oi^yUGkK;aiTR`)ak$ zaj5S*_$6T|n&2srffihiPgakHJbtF=V#ytHuWUjjqq&0yx%Kv63!he~$GUV&U``kf ze4;{ksZl^rNSExs@}R;Ry~4YU`BPWTIE9e$qXC!^3m`{G|Kb}3Jc!jFUs7fpJs5eN zW?uq^4HJj!#vsYf(hFqlr|#R(lg!{gKq1@t`0 zAmVwA0%I>ZIW08cY85FqxM#o zaWuByg*|cMY#EHgkwY1NcQRe&A1*8sJyk zj?_I-54zgr3po95d%gvI#ctJyp>Ti)Zm*PY2w)Fg(tjz>bEzq4thubXth!W1XYV`v z4n*QGsA}e)_K@ZPdz4)jH%`wRbti#pL_NClj90vr$pXFKI<}U-VY9)UMc7>xu(khO z3Lf%~y=E8)a3XxGMZ}7@ZEtsC!7JUO*P%VzW|QzywdbA1Kp8l4KO z7m_|FjKkHA*;9oSGOHtkGxNmvmi;FPGB9AERel()6Q5C)$*3(r0LG(Q)+4i8YVq|( zM4M<&$C|FNkaF@~C41rsLw0Ujwb*a$f66NO3<|tH&V!CVezOw;3v3?o57eN z*gL9E57twBHD`^TgU3X;`+mh&GPzNHDdP%zZyf-oOXn+4M8Xtv4;f+A7#b}h%wO}4 z^8J&nffpC$JOR2oQm$W|X3c)cNb@;F%dt49<-$(hM1;dVmo_=(x)-)%^`15Xda_bs ziliC9+_N@3(!Sgt6#gQ&_mHR^Qa4qPAA_vf3Va zPf)h&*m+{i*_ux~Q%fZV(#=t8u%nWfykOb;)uM=B;fqG|&T0u9L+4TWXy=E0`67$R zvemr;d4bj-f!mK-r0}tJD5W1)gNL|(oU2NCFJ?P^&eML18Mri6v9@c>Z*poP?d9}p z>*?hYN66pJzCfm%=-_$Spii}iA#iJx3Nj&X9o3fVWaF8<9HrOvZrv)E2DNh#VD$jF zD39`(!$eVnR7e-39870pf-hXXaceN*EkKfM;l7)%cUkWYL*7smi0nN}V>9ckwCbO< zv%D51ofveOEY1tU)`G2k#+0&R($BQ%6U~GY-$dzJ_B7!ZH&=Xiud>gYfCBR!ALerg z;pr=mXImMRK_)lKQuX68UVV6UHu0n=-Gj9_*`xv-`Y$XgMBiPZUSD0&1-?3t5xY(0 zJrd?pci* z_w3WJZtp67mSDU3X$LPX;D;3ZCpC$>iH5rSUOfu0FMsuLDjT5G-{sX#&zHA8-{=Zx zPb3{jw)ZIuiPW{`X7SPJ{^->71dq75WG zIq-}E35&dVyeE-dAYVGRT0JW~D#~iE#vXW`D=ANg*RsoiisWLGmyV|#XyfM?R8FyA zA*Lw|s9&#l6v6|{Bs3Obl>cC*VQ77SacysZc%aLtG);{2c#^`U&E!QNo>6z#2r&;% zI0-ZBkykiA_)wss{ti(wd4B$$Ae-b*nzjfLsN7gRaa1fI&jB?q z#l06ikg`0QC|1=f?wNgQElig!+CsmH0AkS=N?lfpsP6vy)xO2;Xg^}>Tf%W;p4J3&aOk(Y4-zmXxG?fP^A@1ALPsv3C^ieTn zDbu|PayS$_B~Q0!PRJOe#JShy0XZ}gP)(7a8r4ZpebUhYG-aMpBC+lfEDAwL``vBx zWl+)UF?+Xg7U+8iv&cD)i6RyT=0Sm~dBY-kr4^VIEZ%$e2-L^8-W48EyWh)0f z8Z0$KE#tdFhd&p>JB@4+lO^GkLA`u|Xwk~-lvO&&2*QxO7hy-4iJh4Uu?9fzJcWP_ zq5WlFJCAO}!P4-BN}m3OHMQUGu3XorDf|lahD2CRg%fEYA*v#`m4*Fi*ZY&K?(l{^MA#NOMjMci2O$@WZ%b!_tM*ErmEqYs?t z>vZt>IoB)&#-JjA4}}vzMJS_Hkku4~1`NIWffgi=Hw%Um7Hem`GuM8%m{{&yse=zN ze*hxYF(BWmPF^TrfQE1NjH6_%Nmu~rqFvoO1JM#0Wi5DYpZDUcF1w$Z-42J=T-aZ1 z8fP^Gbp?1XeH%8(+gbo7Q;rS;i^@cd&MGO~oa0GF?^*Q)ZM(QybGjV6O=W}E>^1F>CspRj4S+QoT#QC4jAIZjFX^CB3JZ)_!zG{XnNAq# z-dqCVJzz}<=_)pA^tsCaWc1z7rsOfsrg}}O(J!iLDw%!)nYX3};P{Z(#ATD1F)Be` z;r)`-BAzb67j`$mblDNYt;W^OF%wMJtB|ssVeZhQy(8GJ6oW52)|4!<-$4f`C|-%jM?b|zfvyJRIL)Jr-?KfaE$a8C5s|o|Fx*zmTT;d8(b3)c2Rj+e zuy`DY4DcL3cYQ}e1305QwT3}?)NOgDM{U=KN(3qbe#gaNn9JM84}AHvScLn^?*z`U zmh-+Yq{*LbnUI86cYE#!Ba7%1=}BDt`e`ML=rIigMX!G7daqh}z`Zre`MZ0PdwK+@ z+J+>ha=)LT-aQNQDXkYN0=Acid_Xr)QP?z1wbs zEb8OOmw($;VeE1Xe~wGZe|kOE zr1Bt$lq@_YiUE+9C7oc$a(0WFNLdNXXb3d&*_UL7^R+m(R5zPTz}f!jS0 zQ2lVm%2iqbb!%MdzPN3K24pB#ukj(wX?wGo<6T|7} z@2L_+CRfig)%Sv{CFO0BKoldj@=y5H>F+5e88iLhHGo$*1<6v(&yBec9!MlANK0s= zu)d^Q;d$N$tA>wze-h0seVy_RAs2~=`gd^&Z^udyF07VFuNahne)kXe@h-RYvljLciz~k+K zQAT(mSy!k`UzieEDxw1Q)XBWTmHkW!1SaqLD!<9Kv=J{+%6%s9EKo^NcL@^tL6$T! z`R6$Vq&83#B!WNrvrRLbDt|Nl%OB#1cyiAlio4P(a1BmG=mSMtHA~_MRILssRTep9 zC14qX(1{o=et@c9ppwKHJQEPWz7GF`^tDn<>a$k`6@T)>A~%smUph7+REcTT=q@3`{!YHu1vl*59Lr_%I)~jS7?Ad}M{?qwdoK=MaC^9f=&BKy_a~ z>lVl%-aqB%%&gUC(FFx#Luy3Kd*P6-t^F}kF2yuDehl8t-@i`O*jhL?Ft2ubGQPuw`8mEmv#`5rz`LF@C`4y3R83@Zrn$U}K9e zfUW1!@F!rIyLCHyWp``j0ppjIC;Ae6N49x&W3w!&-|&^wTP13twK~7A^b_EcOkX<& zP+6H$s!|%dD}Etf#Vu*I*GUIBSq58fH~l#RU{7+G)55 zfXKKSP@Xgm^MjTH)Wy)jwyg`D@*a^{%zqi$ zFk8POJ~}KFuEGPt|F>$n#JkS+grG2oKWri8)+OK!Ql82G^7_;bIQP*vkSvYS^VsVY zF>VNa2o8#J)nb_+wP_7w6!uojRze46*x0H(ht_;7C2W=NuB%&*a{Iy+iK{IiV7*@k zw!4 zP?S35Z8~yN3gEYb@Hz89$aE1rx<0H7_R>YSyqLe9(WH{4?~T?s$b_xC@(TZwSJVak zP5cti=<9Z|-5s{iwOa0^TgCDycvR{T=loyHu=Q7;NDqh%9_&WI?HbFw31MIuC=(BY$ zBB4as;vLwCc|0=|C)40U@RtbGKpeYYR+Dxna!vV-_@J+hRAtkp%b#2$Noh1X#wPxQ zy&vU?Q(B1cKq_{h>K|PJM-$^-`Z)L3(^T*q=~{&X%#486;NP@<>*~cs`wiLBJ_xhW zA-yMZqLvAa4>cl7uuOCX|8Jl2^IvYy;5J>n00;>C*evxdeOW4yo4dSM2JT&75l0ER zbn%zHOUT)7=aJ{LMF4p``!vjl?@gsg3m(mSKa0CiLX^NkPy-lCAUF+%My%T&rRy&| z7;eCI-@oXuD5fu9I^1hwQp#B(D*7)PI-qP_(}wofcGR_RuFplJY_Ji4F)K{+KW{yf z4J~!#c=>BmixWkZMLgQC`Re-7eTVnlc7)mpM#1k8fZ5kVi~X%8xOivEKNeT$Ys+)P z*qMn-muu;}vgKcWThWD}FdCbt3k0yzkW3VddJiuP-GZia9=RP-pCryPfEu~V|2_?4 z6M8k|9F1s|%$J~?F;xPqXxvc=qZqf?#m+ULSy>LRm{U_fSoP%&&`{{cSK)s4EF-O! z-C{0{)wrs5R6X}|BwLACB*WBlXGHd{tIL`9Q5u-}C`$a=x0@$*qf= zX5e8drhM5wA(_e_(?$dJMh&I*!T+BDkyW5NX(z2}GlC>~M-D{o29gowQ;_7-um*CL zYl8O8&2Ri>WHbjqnv5Rzr#}BsWv_ppA9-5H4Jn7NDwxDSn~~A2<0W;k1;YN7k&;t9 z+}E>I=wj#{N!bz&t@B5bDum!Qk<oil& zn3NiOm97vFhTaVVeI><5No$|vW8a9!CY_BU?!$DAK_mfF%->=Y}Vr%5RjAdke9o3U7KTx{?? zi}ZD&ddc%hd7YD%(WVopKieP6ZWbq9l!D)T@OqWroIxFfiTXlDXeZDWH1p!_zM^Py+DPUp5sc4af<8w@- z?8%BFH%vJt#Ffhia|YG?a{t_Jm&wWfbQmU21qXzI67B2gFlYcZ{riDp`K(4aB-qXg zczxlN=sI<$J%&!Att7~7R6S)^^@IM+*g)m1g@zbt;OBZxDR(crOEWKW)~da_^E#1x z<7Um_+)ck6Q{a;>9%FTTTO@gz>V8ZiBU>s<7k6|8kYMpQuxX3S^as%LsOc$UQk4W_>9jJaw^g_mLjqTnJZ-~sJe(c3#rXUDo4iksITJ^X}cHi2D5 zz@y}vQ(v;8Ph4%a%AQKK`HT+mr2-a)ddfcv znpq!!=nC;6lsF6beai7WM+wgQ@%_OWQqfzQ2U>avR%bEJ4XdH@vDI)g&J)3EV-lCk zW|Z+zxw_TUU5IP0iHf`0q@6j+!*9pThKe7g1u{c7N)MW0VGuRbadZCtu#{Z0fOXSS zMzyUT2QE^hAbnfh&fe=`k^#}<3&BfhK+c0}c0PHaBDba;n^tK`q;es64mJJo;F_O9 zJ_b?_6xp2So;}X`K#hvdb#vDl_Bk#~-%46@tHsDrQAQQxob3L34Tv|NneL;x*FKi0 za7JD9q7yQb01A6;WLL}yjB`tK5**Xi=za$Qt7Uvu<)rJ`2j^$Ud@|41aG3?#C-!wI zFw5G}JL(LW(_`rykZloTq1tR#{)%$pumvCG=Fk7MuK8&VAMPOCOfE&9d9LJv?66&e z^LDTXEkk7w23HF&@xyL7Wqa%RIUwLzqbxys$UUWAQY6N!O^7_X>8cB)(R%b)T%3p= zKDmD@|1;4{cZyx8Ld3CXG$HaZs@yHF zQLtV;afqfucck!T*|d?SPd+r{>MUfwUIpvu=eLeGPdZMv#h=mR5LnMK)=EjNf-@J5 za>C7bY!;R95`e)ZKW;Am_kI*KW78t6ce8Lu0&~bP9|a_yT-{1cw#qYT;(cqn$vgM4 zqpsqcdHuQUmd`1M@f)Q4jK|4evVF=g4k$Y3QLh?5!o>x`k{^ek&9JT<+pqLtrpPM* z^a{tYtZe+vcVw2KSynJ2dUBU#{>HI*fGxKf9`CS(zdhEBXQ|xn@L_eA29aYeM`G45 zfbR3s7{$JHSLhWb@SZJFdyxJ62S_Bx7No}=ztepSeG?f9L^Jgzx?h#s^Jhqbex!?Y zXimq!yI@8ksYP3?CeOBm+6wvX2Z57mf0(WrVy9E z6afyIP{ur`9`{^I0^!sTJtPpF#?_SrA(06%q%K8iF59HwaSP&jVpFx_q-DXtGfw;~ zZjFPdHdt(M?;|3n0J_9%-+m@l^!b;@ul~dHt5L;;rWg2WKvksi38UNTIEQgV0N#v? zvGk9{OrP`pkPE$8FpUBho<1NTy`Z}<>O>U7{- zNlozDcPV=YF00cMI<Z3LRie=!iv zuYcay2Yu;$uc2yKSNObwoPtxk+(0?U>sWsbUW-qyYrn8{Y;%)$OJLg(BkPKy=ynV{ zxbzm6jPADM^J3m?D;EUWJ94#W*sHQcBfx4L4EDk$VW$y~QG^LF0VRxofK3#I&HwbaO8-Em74zwsZEOYQUXl9* zPHr>tRPsMgcpUkdE;e_3v|inM-{{=3L&$EjoPX|=HR8Dl+*q{|b>xfB(T&S`-5I#f zZce?D3;Z?6NqnEF9}6nRK$>|_B7Nn7qltAGHnEMg36r3eoONldPKIsVKVy3E=1bbn?a13+(ZHMjKH*=wTvce9UZi4sF&vu~#<`lX z@k^s1iAr&LLcQp1q=Q+3+!33*TeqZ_t zW}}gVPhBIG=@qZYW1bVaepxmP?FriS@lWKk$Hn0;vT6PO6JPVYYQzJ$OVBcgU0!ln z(51?|kwIt`h^MCunN0!ALPo@4x7ZV+guFj7oTL82!lZ@sk^m+T{sZ!Y zOK4yiSl9wT`B%qGquo?4o(KINyA$g%)R^z)?Byp+mzOl|^!VDbNXLKO<$NrQ3@~#n zzJ3>pL{%iS{)%gLqUqhf;=Jd;k_hHF2knFqIvU1c)ST8Qi=_+&{u(~=daOSWhqE|_ zILo0LDxkD_3 zuTUv}@O7My;?V0$ERm3P@7-LX6qZSN!Ax&-hNlMKt&&@^1kux)ej_`dbtTM*?V`>n z3FB+;HtdsHTUk|)_mrS_iy+3JHL7RvsqLRAUOb|R|78XrCF%FD{l*55S)n&NOFi0k z%hEAq?T2fho2(qPfora>sTI!>R(r%jAfovfV9($KCicOL?WwtfxIbr@A8i?CzN5>AcW2n;rJi` z=>KkHd$9LC_+T?cjKg{x0wk^tV<0DlIW{5(zykHy3&JR(d(!dBQ7&&p`R0;bAMH(O zwsfb<^}gp3_rUZVy>dKvc};wN&D3ixB8H2M-*7)2O>$k6CI48_{q7jCas0RnB_*jm z-DM1cLwsr3i)Z`<519xZ(u~h*NH)VI*n3*Tpvk1#@c!zS8kQCZnK8NG4AlGJx*=!& z%w-+8^y$~w;%2Zu|D+k0sxqCBf}F^i=}X&5sAO1-+j*KZ7xHG?#_H?B@D|6p6!*@x z;N#jrq=#khw816E17+yR#!aIGi^fEcmhF5BlOnrLyh0{MAu1Yc{knz_=;J2pwbH|^ z29Cs+@;f~Vtibz1mY$(#aZold$x)s4{AMMJTVx_V98D(HT3e=`dvUy@{6o&%3_cxW zmb%B@fiuc7Q7jLrSNRQm9pqZ!qDXkur3hB(pJO>T_ee67KT)*+{q?~mtM4tBktMmx zPtWm-8h7dX?8R90dbSmtA6{AQo^Z%#QC92AT>IPcti%EoMhT1t$* zkdlgiD$29b!SwoMSG*q)Dcocild|-)RNSpfmmcdm9@F1G^SXD-w+OA8JllFB5tEv3 zNc9BD|7Bkasg_M_-5yJ{SK;oRL@@_D&KtR{wQo43x?NK^Hpv-}KTxn@Q6o%naQG7!@sjk{2yw zHRj}-*|*Tabj+^8!+rZ!Jp`8B1|S~h>mVg1^PY{%$f;S4X7aA=qwe3E(&fK7@Mu`4e*?xW+Yb_is zo7aBJhK!LbpCk{8y^>$^#HQ{^Q8+DWTmEM_UccaI`9Ak1;KI9}%p)}zwxUf>vqb1R zFUhR#nf0yjRzH!jjKA#t$=E)oE=k0GUpD@xD6J1u*9yT^s3fEYtoN9s0{*Aru^isN!+7Z% zbPJdSQJOY}7sFq<-Yu&%P_+@J={}X{$>C!6QxpPd0Enzo`nb$GXYZF;K9#i}en?L? zm&Wmcz0yOAg6AYQ(yC0<$?R&ea~sN!4dCcF|8x;gJ-wFaHym<~c-aQfLO-w*n0+9R zWK(9Bq}US=?k1i+0?$M8>L(& zC||{q%wo*h5&U?8f+%thZA5u9;gE9tfyz^7-CYjoa)3icYMYy9#x{UW((LPgyC+O>HZ$f zVjF^~8r%`}TS@Ka35$(MnDmB!3>alJYeF^Oi8qq_l7RXhhxrgFR2hwO8_+|k>cZvV zl!4ei$g@)ZZ!6J>yfLY9L?OHLAv})CmJzBgg^hylmK}SlGH?uk<&hL)QnCWw`{KK- zl|1>m=_pDwF<>r0E2vv)B!8bK&G+1WHyO9)lD7UK_M2FHeTQ2-6McI9TvxN8zH0$k zPL$dQ>vBT&e#4NE=QZXLu7Le=4+_L8@+!Q@$IUEA_K_U~B!BpiKO~&tk&wgyb$RpU zw!rA`k2z79}OSo^GAx^+(`f!e(MVN-m=bT+;yyY;^Y`hAM~>IL)!Feoa$kSs#@2@R;t zp~m<${Ptgv24-z$^Z*gsSCC-hwcd7V%cT1E@&@4Y0&Z&vHY3>y&lj7Rarb|wnLQ2x za!Xqaja)GGW&q+Ku7AD=G>XPjOq#tdehehpjpaw+@zgp@NhXl;jMms|0ZA32IKro0 zAGJPM%9!B695iqc&cr1;i;)qLpwzr{%!F@`qOyD(w~UxX>bSouvqF0?VZvvCxDus@ zo{CPyY7S5!=O;5}Q|)BDPU;^GAq+Q{CsH3&-?Kj(z^Q%z(Fno_({Mce`Aw%MBJ|mG zslo4RrME1d4Q}hB4Q^Hhf#2(#$?~Dl6nbzb1Vf<#SyqUNh4>~{9yrQXR@L{lMC4gW zRUWP=mI_uW%9z{zO#=!8@E|*;N2+Xsym}FqCWx5Zu7UF=TA1&=^ z>B;!?th)_*em)fEhG`B)7(?j9KY)Sw#FHfBVq;@tyxA42oW660hlexaa{BC>Gdvs4 z7AeVp(%IR$HCJ1L$BK+isr}LH?9ZZB@8SPdGW&A4hZ+_hceVl!FdePsNW;j~Knbw> zg4tJtX-ot#oTugo*Dmchk$F;4p9CS(nd;HxLf#k?MJieVB6@QPFoZf~F%KQJn2hx^ zyHGGDT0RSCVEEnL1{${o7zB#R!9j`hDn%n}pDL5hS6RpTx_~>l|Jqqi0rGHou_?K^ z)ELC<55ODLmJIJb1fu6MptEo6EEZ9rkCFV({^-;`iEmQQ7A66b=z3k!WvBojt^#~Ql2x?F#L zC3q7l6Vi2wKm1}XUVPQ|JuXx78p!V7H}|=d5djSmP6h?~1VX(#U$0loD0X`e%}`ll z%2tz)ok?ZZ($?;I2VwIEh}kg+D=cD!ec_0SCaqaV}D0X z0odgE>AZ{1;uj$0SP2y733Mv6QM1QhgAQ8t#glk5I+SXCzyh`LD1g9{d*lFCMA%ym zf7Vxm_!VFQD(%K-bzC&-oD8(f3<=iz5~S?xm;hQiSF=W6uZ#Qhz<@l@*EEh8x`_rR z&GfIm2I~f|x<+^%Cglmf992)lG3LR_(SO?v2lA}eKr(%6@*}=t8f(IgsXrzdki5V> zAhNveQnAp!atmfwR0gCFL1VYQtC5*}E{SaI*LYxk(S$l{RDio+v{_0mh{lJVN&mKs z7YU#ZvT{t~>2G@l9|gYQ&KHPSX4c3#d3gE5_B8+`=!flVSEtU^dJF}RKnWxX zNV88CYrwm-p1j3lmgVkUoGwtnpDfW+gSqdfWGK(k)!jFXouVN0ul2vpmdrx|oXKv! z?&%}t^jzuK*O#yd0j6dIY#&J)2aw%NqCBBeo1qLQ!CdOB}rF-TDo0aGVn0u{nKB2M<;;mg1^208FuFsfF(%$kp1_0 zkL+A`IB4y^&tZRq@)?vJv1$2Y!N0-hbiAO$1r&e(9`-r-nUpIJt$F}XqsG6_E0a#y zX?Ah`Ui072LQ6pXFl=X`5#OZAQ={+aWa5AQu#lXi0gZw``qwuEJy82X7VzF727yRX z{I`!3_nij~{l9e1{o>G?r-mcp|BK%Q>pAQR{(pKV|K==0r`7+iL81Nkd(7Xl{|daH zzY7?szJN`hikg!X&vrPgvn}un2NyR4%o&&l{%n)i6PO4wom}DyfDwUwCJm*C-?LlD z|1*kW>1P)UyySkq)Di!R9aY2@?bC z@H!mJy>Ud9&hxybM)j%`cW<8>rMfKwPYJi*lgv|nOR|#l-5oQR5p+H+u!tc99$9<7 z;=Rue@r^lvTmEkF#jI2b#LQO8)}o{v;TYi=5f~A<>Hr8{R{%;pT57`{IRf#PQZaeI)+#!^RJ9!X%td}$8XQe^e+ z>h>9?DAh%CxoPXC#H7vierscV7=Wh@F(;=*tl;e}4Lzu`2RsnW1SnS_Buvt9(Eogi zFeIRbPXmzek%<&Smf%G2J4A_bew?NdzsdclVmv7i8t|u)Z1p=)Zx0F#xVt$!2XWfk z!fO!)PSu}nY5xq$Y$e`t%8Nz@d;{e2SB|(x=LwdPfqA&g!d+yHZQ0x(z1>{{m$YI!R_8c_1xNzY`oPSY`D0-) z$``O^P`%`v{9bJfHL8F9TlV5_^tzt#Mzxcs-Ox&GSyNG^K% zgq3MPBmHnDILvD7lH3Pde>x-gA}#x=zT=cx#Cf7!3`mwf53RqZb^@|aW$;~4rb`DQ zi+=9uQ}x^jVDBP4dU@a-l0164KLs9}RVYUtE~4@GD(g4tM+9>wlAd?5teLbfvsAa%n9zHQXo7P5D^@2QxuAKj^1ASbVX(T0J5v z^-;4>y;z;*obbASIYp`x++d0J(7WX#6|#5=Azg~~LZ!4xuHKoesMi5t>3ewY?eRa+c0dOw zhLXm3xtW;f?@3gtNDPg65_ev5P?Ab}f*g*$WKeDMafH>N3IW6cT6|QpX6H!Rd8RI| zvcH^;kiL-p1{(4)tKCuSDQcn`d~#^~{%{d|+M>~-ms@m$=^Rq69QnTfv3i!ElVC<- z$c4e>U{61UM=Cz$ybP)T;4mAunFyRolQ&aU`QKww3XKR{pwDh>^tJe;CTl$&|V`~3^{Py5Gt~ct1-OjtOk4SU7RYInra@ZIYg4!npq-p-A zJ0JgW^m+6dxz&jT+_YH?I(FGomj}x(l003l?a*EYK~4#;MNuD6grBVK97riY!=||a z+ZQ}>B2)Ls-~l&zKafa&f_xN36{POMMr_G*C~lD1mC3ezT;n{iyJ$@8_tOX`vJB9( zN|usGHCE@bT#5y9k9v;jWmD~1J|1o;eDH(OS18<IRp{*nVowP>l~;EJc{as|2} zT^{`&10G*IzIuG~81xwO81@(u?P%)|?QH85UE40;4Y5Sgk;OXKjB{!5y`DakA{4BC z#Xe+vdwuaKc;5p^s=QAk90+XvH?ASHJ7)or`=%C?6{Jae*;e0(h@H^ew)X$)Ju%@3 zS76Ml&U9YI=a+nF>wRBaG>d`^X7t&)RX&9Cpu~5uBZl|7FChi3MBC2^9ZwRyUwtUwygm0}T1rS^(G{^vyw?Bo2dE8=h7c8GS&p$jrQF|M!fJRuHa z+BZJTI$Ss;C);1B#;#ziP^(Co)on|pQGcxWxm0sWx7@TWoD4m{_gdNC;xev$bt?jM zK4QB>ul#dE;nydhWW(SwE5wtr$(OVRQuEoQ(a3A%9DWf^6DJ}e%M=^dej-(ViLKH0 z%gT?Q!l#Y^st#6m#B8B#DX=F_0PCU>52p?fwWzqjkigKuu)v-f4)TMSFCPS4{`$ay zcnFJRZo<2VFD2XS`;w%FQz@6I1YV=akWr`!mh@$M0}Y2dlB+lM>!sof&S#ib$4at9 z1`kH8HmXC}F7-=N3@@Wrf~0eOa4;gqALi11sXuQa>9Y-h%5_@25A7f;;m~H0swfp& zNl1R4+E8;2Oj(_#A7nn=6 zrlwdfnl53DP>j%wFvR^J0oo3v`wLpRfV1=O{#WN+HpV^JQ3VTkkRi-FWbx7-;f%5w zv&#WARSqiyG@IsREpp=xPM9-pxwsi+yGxPKQ+7>Dt#5(d^1Ew+VYGqQ=Lf%im`8~% z3fd(?lK9U9OMkTS$L!C5Be~5msLOsC@_Tnd zl0oLp7Q$iv{67B8FA*hkok>!vot^bYgJc_+DZ|9eJFdX(4vWO4BcORuW9+;`F^13L z%M@ky5*vBatoK^?G;v;x%gDuDzzU~d$GWxu==T*ejUwlBG~pNIWp-WRsH&(CI+@9t zRVDOO_|xE1MAqu}=gCnwv20xWX&tvfW9><_cuWHHbft0gm+k= zFXE<8lNvrVqGi$^p8}F|E zy6M7=ab~21mn&Yqur)KMVy7JNWZ{>@l>4yi=CNgBCK3C?*@|DA8!A@eqTa&sc*wVq zKL?&d4@-LO31~cFj+{A}&*B)}tlecl{am?#s?+YVSpetSSKM14<35i^|8@4EPgns| zAu4Vy92#3OkZEBfNZvP30Z;d0dd#)T^I5AezkwW9V;M5WD(8H>i+lpWTpYIq>YK>z zCK+Bv@CYyWg-sp`TFh4@q<3}9(HC)_CyTE(az&DC2>fjPDWF=`jd66jd9hhJ5$_*b znT9XqBbBg_-k{rLZ05`9`X50l8)xjop&xdEYw?cq>U!!s~;LuW;^5 z=371fWQpYNlqqiTJksRzG2fII&0{BEv19y4Rwpt+m!(+PRd2dT*D-Sr`bIj6J6Go2 z>qxY^&ha|*RvOnYHXDE3-$NY`^9SKXB%{orF=lzN=~aBum`|C<`^xMYI4GkNcALU! z4QXk?8komikw<~9+VW_+rv!i0fLr-Nb|;ZlOTCX!;v5~%0^hmtvl|R(sq&3=EZs!F zJClw!mkVKK=YMPV%ZU$aB@p`Q%+#kwx&$9N%(7hgi4_a2z8)hKIqi2kF8By2Q+JO9 ztQ;cskQDw(J*prO7Z!+`mrpbdNTh!a+|+d3`A*eNujMK4u(DKMAx`85fsnml_11W3@kexH$*dYc6Dn_;#m2^@ z`RnCd76UwBtRn;B8ja7!0$zijg97m^y`*yNR?ia75M&qafNYT9XIMcYv8;a0$$!61 zF=}3Kk4$-YZACCZL*t_+gYez|(cF23HI=n{Tp&PbLntF5pcqgDsZs=#B2|bWO+i{H z0^-mF1SAL|AOXdZl2Db6YoGr+l%ENwk5r;>M9v!FKUVrMyakdl zyRM&@k^0@BPt!(N-H`gXa>5^$T(*a8N&-6O!~ARms3s~`RY$Pi8XQ=P{+;Y&j%XDj z$$MIZu7-(I0z#DxY$qGuoM7i3KFQQbmJxJJec$Z%Xu_>;J~Q!s2iIPV|2b@N^VH;= zsvd9BJX&=&O5RJ2QeNzo8?gH9{8Dl~AG|G?p*YJlM{mbA82+1z{XpN_nJy`+h+F+_ zaG0YX+qRnV9!mGM%heU$qh4(=>*zk2wPM}K zc{`XxSOO=PnEEmAuzQG|fT!Yi3Q=j#V3Ov_)h|JLbR(t*A|z%hw!v+^IKr$AjOqpt z->4MG_eb%Z;ArF^u1t`orhE|MMid~l?X9Iqjl}9XPCi?7i2G;mD?*Ggr6=3L-ym#f z7{0*DR9#=`>>yO7Gt4t=GHzvf=XjU=8ue{o1h7(y=lYq+ruS>Z< zg*5&I4WNje+(7-8KwxZo{K)GwwyTqIC{i^gOPkp+MV2lrE2|n9Q^oQa`uMJX zGnf|ZO;cn8x|5l3>(?w<7T~nlHvKJJ*tsW7F-6%&80@x=0N2SeBcir{)Gd{Gjb5ZQ zhS-3L-as_j`^C5%-hmSi$Ectz{f!DA!9$JigJbhTeQf>Zd3~xn8ShzF;t;y0U-k?P z45)^shNrz5v@$GP?VFI+DoHQpF%XW&nBP=#giVELLJ|(-OdAoX+*m$cLLK=}*QQQUF&;04fv= znYKTEq>15xnt}%*NCKc0?=#hg=MvGw0o9_he(y)!^knPP>E}Rh7#l)H!iq$N#gLJG znyGQ#lkKtm=9S)K>enZARY7@(#8NP$+kLhV&hG*WR~NI;0~1hgfxumSc=FISCfQ7t zXeSi9?HIJwutlo%r;&i8XBWU8v_k{P8E|Fv-&Y>$>rooB<f(sB{X&{M2dzxU|?!n%Y`?9q1A@EhDRjA9q-Z`=B7HeGpK$`6{ zDm&J@2jkIruuH?7G;qgmNE8n+I6V+{P{k4izI}g92u0Kq$nCe(w?@c=8R>h6GwI$7 zY7YPs(ssYMJzC~7#xZpWY``{Z65ef%J!~>0s5ejoMm&$`3l|QiRsAF;GZLnS4Gq_V zK|ZyN0}vx&Q|TdY{`#5}7$)XF+B%v!=w{+ibT&dW7^&b@c+$Zrm zrWMtZK$(=A=!g3on2!KS<}Xto9`09o%s*m2DF$r2^M_a_9XJYch8J!x4p;aKmX?E^ zro)%YKu>W_s_9_YoujYNKxO#Wb2W>vE*!gV9r#j3Lf7SWR=nW=$W+=zTtyE-qbaG* zs)++KEJ~*{Ie`@_!V*TxggHQ0@|XKP4bnGNiLD=@h4Hxxk9-e%)wXP<2*E#B+G+R6 z_rcNUF*SjNf!`GqoO%u_sQmJ-Iwo3BM>tHA1sVC066cV?|cXnzi2_CzJyeR*`Wvx_il% zpE*IxIvW4>A&6O;_z-ZFRqI8>@Twr;!0r4Vo-W%}t)cfRFN||23rzPKi+cRqQZ5C0 z?eE8&&B*ID2*^{bdU+`@DOLVP3Y!+oG^i7&9p1U=<;5{^~k^yyyMd zmw|sBQ=Vr*azDIP1ZKPm5fN%Y)O;U%lf@XiZqnF>! zVs>_x^GWPiByL;^-t-u%^0v3*dl$E(68iIg4|n`Ann~>!ovf{(c%E6fc0l_eP+}}b zt-26_-v>=yT0?4#*Z6}0XY%)>#}f8?L?Oiuo7Q-dAzlH_azkU8qr`=)AH^{1!!(4b zDGS+0EG!~6K5%MWH=R2xhDvs3aHfYW(?3w-J+1Ax1#;cpLT3!SDtSoy3%ZO??7W+z zeLFN_C6NK}%MR9hi5jya8 zxSRka90xm1vtSb4KI`7ldo>77CyYAX0?&Q&Sv*2SZzf$WIN*1M-stE!?jfG+8BF#R zf|k626bI6t@1jBc^!%A{ZY}5s>`^T4RdZ{aQvIi%DIG{@n%3&VfN3Fd*bljvNzhqp zjbz~=0a3mSU`8zF3Qj}CQUyniSEAm}adiWJ?)F|a(@Aom;>bDRL$WMAcNR(h<|piY zAS#AxThG2(B^nqau&d+j0og<~rbUa{iyTLlKJW@Pnj^L00&0?8%rVc0pcg|ei)^jR z9b<=!A7eDBZ&=75YJ~A9l^@UMb;-Zitm*)xmws}7&pR$ zHAA~#D(wy2h<&Nkk2*RkVVof%I)#Dzo};7OGW2gAPX&WVTTMNni&U2`4mZdKNG`+ZG`tu=!!)`7xL{TQR7>n66^V$SXg`(PnzT#e}rvETmT)02YyNz@GibIzFV7|(>B(Mo<% zOHwWFNj%@|7H#y-IuGxX1JV>Vgd-0HbxS*35Qtk`dFWE5FcmmEhfQk96$99BAHJ1&_wLS;>294&>){eD=US$*vdZk!Tquzm}6#=T6B=t|D3 zints{NAE)npA21p+!0o;)Lcg%MCZ;?U5vkXpIKhAYw}TJ^UVotOKLA#+RE0u^rRDI z_a-wA*fpRV1_!RWfz<$>H8W}M3za>j@$OF}Ydrt`Ph+)gs|}I;rAg?aib|Y zD2gy#WJ5|;h#ql4xoU5=US(wb6#hru^`LnN9&D6HM6&e7+B(aPx*|_b!(3=sf&|gk z^CkIp7W%mE8w+>qz@qckdYYYSzOww>b@%4^<5hf&G1qLTRLb{6IYX-wk3W))N+@lz zZo2LCtWx5FH_r)fT?KsxairXShRugMCy&DKelmZYRtLLsCB$|gc_^IPn{G{>DXZ}N zHBeIjJGy;f8J&zys=36qU0R=k;`1HYRfmT8J?n4DR{UI5^(m2O+HS_5vuH(dI7A|6 zwXV0Y>{-9}K6&otxfy)xo!&SKjH%bEk~rq)4#(zhzgWLnytg2=GEgoPp>6G4*-dOB zvRFZpk@>%xTQJIjIyyujjds9yFs7)ZH{$=-myu=%3?w<(mQhx*m1E#zpnFZHNXsGo EKO-m_e*gdg literal 0 HcmV?d00001 diff --git a/clustering/outlier-removal-tool/images/Local.PNG b/clustering/outlier-removal-tool/images/Local.PNG new file mode 100644 index 0000000000000000000000000000000000000000..4a1580ca04d22c4bd0b4098a807f44cf77440ec6 GIT binary patch literal 221024 zcmeFYcQl+|)Ha+5(V`4vbizcm(GtDJXo)U_QKJUYOY}&TiNPR(5H(5=5k`sLdx#Q5 zi55nU-aFqt$uH0QKL5Ude{0Dax0SiiIs5Fh_jO%+pKxtW6*3YAk}FrPkUhAs`0&b= zYrI#k;Qb`J4*W%E!^SFT)sc%Ue+=VP{(M(6{!p+$y858p&_WetMmUAgfdCQkcr)>jS?;DZv1 zKzgwJ_gu@xS?ok820t43W}@urHSuhN?4d>KyR@{LYFp=H7*|Br z-=8)Gy~2MFR029B|NWUuwNA19-~Zv-rlR5f_t2*H|MrE+ytCbYxhl^Us%U1ZS5b^& zma8M5t6XP;ZCB2FfJfKGZ}d!(o?T2s8&kEtZjz~TFIZAUo#5Kql$z<%_9GO;#M`h( zU?m+YDjt2{V?%ejX>+`cQ10RgdUCiKWAOO-5Anl8DKL-2NMUp0<3Rgp`@x(C${2C& z{2L6yM9)w5npt0*1r6nCF*f5?c}p6%kT<_9$}J`;oI1YB`0a7=o7LH6Q5{Mx^`+hY zj^x(OeQ((wVK;_h(=RevF=V(j-wVkK{{DJ@bu{Ibif?V3Yj}U9f~cQUS2(#4P(VPjPgSf8{QjYC3896 z=NG4&%hUC>AVx4BSSXV~&Dv6Fs0mmj3)9~z8h&|7lPqi>+?~LStv{S@&3JKPKfOG& zIq71#@bimL%fX)Z~Xf@V5M;Xzc11$p^~e5 z+)i>B70b9u^cpY&)#2ayQB#+|^Y(xNayx8J57rTt&S@Y#VNDN~L^oy^LQ!$?u^b*| z33q(s3P+*#IbQw3rk#bJ-lcj6+tj`MYlP%UHlGZNMOP(PbziT|G|H(vbxcLj1RgH* zBu0sw7fb@P>6(-I{B8qC;o>U?fr6>OqmF`=oD^iPR-ONU?W`ef{+c)QeJC< zqsdEc61PQfiU%BFByOt&xXvheKf-z$4rpPQe9y%*Mqg&=0!9r8i=Y9#NcuYsZ}c@a z&ePM)MzOwUVdTsM@8z#qr7lYIV)1_H@csVr$re47+G#Vb>1Qo+Pt$t7l|rs9l$cu3s^f-7x{u~7a^?ss4%M4h z9cUlGRkr6M1+r8Vc=CB8SLAUltA2YHk={#tjn|k!wVYDds8huoT%T$fXbqbb% zQ*pp3;F(NIC7pPDEzkDuAD;{~^g^Qrjh8AM#|Oo0!4&`bK~Yb?U|P7#J~zDKXpR({ zG$fy^{ZS|Tm5}D)#?(al!iOP^=H}%l;50;dPy2R?|C$W!O%~DlY$RgkSs{^esu?zO zT$B_+Be{Pj4ZYzxurgffGBZtd%T{i6_@mXL8~x~PO*B{JTEHE+u+vZBp^{HlozXtn zC^7Y0JQZ!K=H@pioy8((c%Sq^v$>=>r4PZ>b+=s_j&nf?lUnH>ER6jQkq|z#xnY%S zCP?HA?ZGnh;_`XfR4@T4Lvzdd(z9H*=~RO`Yb+Ozwy{ZKF6nM>S0*bQ zM#)*9@5R15>=9`Eg^iIDjh0SB6y5dSFxjP-Z0Iw79s!m-0}Fe))7zV;m|mz^wA?3| zS={~H5XaKq|Gi4a>)TvSK#E+zkt_C_nA1;inQeaqU$iBUK9f$n@LjKkth@bO#A(kW zkP;q?zd|Tv1<56W^HsE(6Vk3%*rv~bP_#r|O2%R|(}M);n44kz&`~7EaxJyjZeJm$v!=h&D&$xIlf&CyQ4pI%0>X)n zJI}vXEo2V|R$Bq1v_R4Yt2*Ai(Nx->uKJtMNBgJu!pi}-T)%vo|#S`K}GV0%euaNGOo zleug}CTgxaw<HnClG zTC1E}cbH0A;%_MvruJEZxrK8Dg!sbe!jaQ3O51oSFkNm3b&+h;FWV#r8lxNHt`jip zy!)0VGDEFZG20r2TP9m)ZpP9S>H^uXbGqiGm!wM#uMGWub%WmW_mkA!g;W(zGIXbs z4t97Mm-Y|;kHajFm3Gyszq@m^H>B0-S?fMnCVRSeH=gHFca9yOY0YpBef>^1Z>|~g za*#U@%1rNBzVfZ?b((2Kl994XveH3c;Cy#NPY&0!mBDrtZNq-dM}!5*SmjKhLzXf+ zb-;I~&bQ2VrE$w=tQQRr@5G{Vj&AGw@FzX&luR7_pqA3|Je@b4TG=o)_bP0DX$f&J zL&QPfBhdGA+HRNCtgO(RwqqVWz!^4VaQXkiR$BrhQ$cg-$hnP#*)li5%``VkCtvVk zYmrrS+&YRza(vCT{kf&otUrf-l5_h(x97z>=XN97+}9GWGhiHBA?JzS&-%dY~&X6H(|IXTJx#p#$XON6Bg+J}skVG!FFa9%%WR0H9tp=r}XWpOXdeKu+<5~KyMv*YBPdLr5 zNX0fe3mGk!%~!&vpUUn_5-~t<0Jhd#;??wMY~jT$mFy%*WU#-=dddE(C9<>aK53`! z?1dm67DeNnn6X61Q%G+C4*mSzmz)d11O%2;JbxDI#u)a9&$+(NV^iNg%DDB(HO4F@ zj3RsfK*P4jf5C=Zo9YME1oTkv{p&-om^vqgBuhQbRuE-YV1lr_sP{uPNt*oa(L?NF zStIS9NfK1jq&4r9JH#%}$emm5O}`NlUS=R?JR7+6$Bafnx@Y{5$-C>3@W`}+B>iVV zV(pV`%veXgBX1vR2fLxgxBZ#rTSUKtbxb-RT+QY(_oHhkg7Oz;W-iZe4;|;_XVc9# zxHhRou&ESoO_tU|+R2b41f&z3Qhce*mhAdI>c*AMB6DwaxRgVE%;;n+ezG*Cj3zF{ zclMQwDYsPh{{fwI0)Kr^+!D!WFX1Ph=1R`eWe2zO_^rtWSysH8fU-mehki6f;|(BU ztwz>~$wGkjI>XKVjf*_rdkHG-@C6z%v*^!;op_-IzS zY19`00VF#~0Zu|jBFZ9wBu7#bgb#g*>}=WX8kQhQ2BJ>{B?(6$t4izAy+r9m=@=%T zHyPQ2^ZRpw?s8m$e~{|`wQYT=60u?~X4K8UjGse_!YY)b80b|J;mBC9WHq{AfXy~O z)E*%`lCPcbN#rY1o8qlK^mewcb@UV)RB)7QXq+7fTrk-bKVI7iI zhDd~SrYoIJvXT%e3sjK>zTM~W2K>p0UInq8B*Cw~oE507C?3<3Dl8uOD#Ip%4k~~? z+B%|pXE7Yn@+!lkm%N`|WipS*qb(vXqTLRJBud=%G0gAZZQX^l$+M{l(VLdQh1&I` zNm$8`x;#2Jmwd5@TSrwgxfmU79ZHGse1B-nHuc{mxy)475UR;L^l^Fll`U9UGba=; zG+LncNeO@J%AgWfcnJU`ZmB+mxeP+*sI0J9D#;BnqvuA{JPJY5%Um>_sbCbsdi#F`D zHWDN;0Wp#&Cj6s+t@q|Zz{yO@iwKbNN1ux%Wr$IzkYwAs2XlmMx_`Ps`cT93)8u5x z2%g(OSFKD85X)32|KdD~RV6M2;-)5HLbR(-2hk1a6-`xIX3t!R#5Iwf-M@G*Z2*o8 zEs8QXqK6WwS7&WYaaq%mxsuea*bU82*LoL+BV1)7Xal2H-7}7!gT+k3PCMuP1K&oA zMox^Up~*#KL<+WcCtpMmI%goa`-I$+cdO8=uBL>XX_3Lm)A@ktIV3#fw3z{-jIK(B zBbSDn?Jd4N^Wjg!ejsvVX9k17%##o#Nz71lc<7c%$wepqcE*;`R;`a_)$^j(bN4>E ze(ru60=mYKjQWY)d?1~*G5G+)bTzUY#vc>NP zXm%8fjNf~J#M%K&>Swt<4z|u_$H=z4;ZQ@HXA)JrFOZ13OryWXab_D7o7s&Pr3q;^ zo^I4W!4VxHB^)wTJqylidl4izIW%=C)Q7zTENNjn?pGU0HV2rk=Ph6BYH_1;D2bBl zC;iU@r8a82mpB~*psX=Y6BV-n&m61v;0!$5R@vK}VFhrla@mt!w3{DJW?0F~z<6(( znbdfG-R1lK2ZySjHHGKn7m~bORiG`59eIJ?tx|`={;kY;vI?h;SmEe@l`Grgw|7=q zT)?<306G(Te&Si~GyyTK^=b#`6xuaO!sGUB%~aHEYnW9_C~=aMcae~044n`jfa)QE zN3(=XGJeDVYh3O4qo{pSs6G(jg~XCZS)#yWQFuFd^wJ$jZ<+H4g)s6XfLaRzJTs1^L$3s& zk=KEED!O9!C-{{dATy%4|K@zcKOY;cRH=w?A`+ThR7nr5gFcsj(#8I`ZAK=*&Flu= z2R&f4(tBmu89w03k|^Tzb2#UX(yO>vUcZ{I>LzJfz)yF21ik~(f$rVbxfg$S`91D5 zFI>z9fq20n+rCtxXFRaF5uq9{PrVT^@aU={5Ev;7|V97)5g?^6L0 z0`u@16bXY;*c4t(M(1J>jrU{O?WXG_^oz~b4UK^)u`Tj%)^!NC!NxH*H2WNma5|v@ z8O2HQk7j_tk@uCGi)2eb3Jr%4K0Wn+^gpV`?|A+IcR3Bi7XaKHf#`3(Io_RQ+B7yB z$!)Iw!*V{<-n@iEUzkiC2(5wHz@z|F&Pr8|u+T$h_j^bu0NcX=2onm4{H8cr!|`YN zSD2MIM)KM3HY0%JYGcH8Ab&#MA%wG~#j|)I7lcE*M+(6-ykMR+Jb;Ju*Bws#KWXX= zx<;4`S{W^V>=H`SP87Ll*hpneZ~*!f3ONF26%mL;s-MDHxETE6KE@L16e-b1LWrCf z=SBnY1poT=@A zTKi)=oeK{62pZ)_{McVAbgXn3xYdi5pzmFn7TaMjR}vyGyqrw zxvf6SAnBQJulsGz>*rfOW7oZ99mko5DMhW@>PhCxrL1y+(u;j*4uD=H<~FDJEnQ}! zkX084u6N$gD(XGyCA$|V&p-dvhRu|ALVW7|>QIto1)u&mlBSLia=(5R$+@j;S`k(I z5mY))?>XY4rSIzpu9nD^$G{Q4tCIkzeNg{{wOkw51fECg6XxXCA6T^^@vH&SMp^o7 ztCix3wZGTeoHz`3B$WEnW&F`Z5q}Pa?|>^i516O_U|S*C4Dx3GOVp6oo@NyLd4_U* z5dR*lHs0g9AN2mTm!ZF)VQ6z)){4jH)o-}G@b_FB`;N|p&o;kpj@HJ}AhRPtfytE` z%XtttP-d$IM41t~Jc*c|B9m%Mk^`#Po4XJ#eF=-%#Ze$`0h(Hjn`2bcspK=X1ZErw(t1p&)V+;@dgZ2AGfR- z1wvZyO|<4oqBjFwZ7>kkde3@-{UG0qO>6&-4){GO^?$Z5aWuI3RNWJ*;Z6F0Tb(I$ zZtPyk1?0_Xr1^>koBw7x*PT83VOD)`>q$b3cL?KL*OZD3zk5nQ?;}x{!tw`ViB&kkZ(H9iE1Nybkdb?Fto`NNDNSktsP&q!-7?Qra3^ zpF3H+8Jap88~_-Q@G1Zq;JByH!-_lbbX=!92cHubR@s;gD8G}PA#H|149rDK4zS~wY3J6#J8dVFuznq-Vr9Pr@ z3y(7#^;YGlA=h*ld_om+8&WPRzf54eC>h=&!{U-2((48(X@I0kF4J zcmVtf{ku`*m-HyZw8>?oX?Pzw`@JL*2y^otZG2KF-*cGtn#HXBcn4`OOwV~CjKAJA zX?Q5gMq^4`G@r1j2ti^}^*%%mpxn0v0o()tBtSm5OPyf}@sMAvvXd@h|Gae~@1XC@_3^O?IPooybwFQKU> zIj%gS2}yUc$n>oB%oh=7l|*xe8xX#2UNW6R47EjBDV#_Baq=wV2!1Ld zjcg!oI{6$lR>nrjIm*}m`)B~5DU6oF|Vkd+TOz0+KVxJ**EkHUm6T-XOpSNDK|{F z0UtG(N}BA6nvv9RFNxLwNm~?xaKTFT3;+x<+PHF%rH8KrsEU4Mr8u%0FBYzrn%w@0-%D8= zm|md+TOR7|#23ErQRM^DD;D7kyG$H2#d*j)Nn@V|Crhx)JG_m)n5@AqxZTk!#`;&) z+h!DE`eI(#`IjtJMc69fWKUB6^E@sc>|$m}mEfnj*-Q%DD-VoPHx03qlE zep{FlvS19E?RN?qqq)`cboZ%=45m}j)Oo6EV;q1AU=C58k~!!f^+z$v*r156+1;r} zIn}rlPi6EhX#E9$%Z>o&uTwvQEq!@XW?mz%p@9}^cG$xMaPyp^aTXN8gtfgs7V=0^ zCvmT|ms8TXYvw0>TXRMGBq1PQIQQ0XG+zimbjs&b3}fRB`ylAhw9;0DjaPCKF7L1Y zkEtz*RuzLo+@7~erZP=3t}<5lCh@h_LKJrwW8OPrt+@syh0@oo*+Nig@N*?kL8BH; zOCgYeFz0XhRYkVp9(d^nyJd}$k3PQoXCugU5nsH!uSA+yhs-()KFnB-2HiBND}*ir zQK{|rzjzl<=f3(D1Y=w3!$s;;Q+m4&$8qUZ8JNq%>oZ`@m{GM`3Xwbz5|OgkhKgz= zAc*?<9d0ur8kn#O1~Z~mvEWJTpPKdIRDnzK7qK4miL?><3u2e&i!S)M&f&Wrz+BfETz7N%=;r9zCMwu{=84XJ6pFsBC<^^JHMK1E zEvh5LG=FfAqMgyGYv?wl!-=^6$k$n|O|5_hQu?J#+dH51ZXodh5ipi2EbG6R!Z8ic zEOlYU_=yO5^KW5enzEFdPWCH&s{MXIVB2V8T7KN_?z^WNm9^d2s&Z4isozV|H29SN zTes8f0u|)xaJ2d4+c8R5DRaboz0xU@=jYlkC{h=PP@JQTX#aJT?)s(4FCIiEyhK5S zc$ZcH`Jx~(;7@Cvt(PCGlc41F>Xsl%j%-3^uy6I05Czy?!Q(uEZiQG7oh>@y^QR%3 zdK+)LF1Zj*LRTX`Pg{7mN><+`8Wreh-G$tOH_x!Xc;sPqwbN-u7vl7oH-x7cd~f_< zMW^Z=l8KqmddHL6%5T(E#bn;&qo1+QLq(lc9p}Pv$nrQnFvkVw|CwXh|4q(H2?eO{pF4?(Qo!>j%%E8)-o zi#YC^I55Z&Xm#F#qUpnyhO$_=;n1Og*P((kdGPYEi(+e%WVmbMoT+QkocbM%?j8l=+osP|6B6Sm+c&7*a;Je*^BS+xB^JJy$#zl zF%XR1C`%8UWp55-4Y%O`G>{GELN$w;#LD~IqD8IGsCkUV2rS!fR?@3)gDMW9ENbF} z0^e%ZId+P4BVQ^V%7?b6>@EfI8NZ~z`_gOOwV;5pC%I-@{;+geI_pLOh5ywBAPJN# zhR5cavO@=2JZY%!5*)DWxj$16KG|n&nF#r0F}nC6T>uAaH?IHBM(7oeomKNurm56y zt2;>ba+`ptn6u0HgWFwRXMkYavMqBOt{6T9ISc+sQTzUfAn`9el>V1z*IqcAgmRQk zx47&_{OURTDwLyVIb2}R=Fg~QAe1hrWyEAmdp8T7xnomZIQFBSfWPSR`Z3l2_ELrR z2#vx>8Y4Ig-6zw1|?W6-If@LV97K=)hsQgsT39(&4tSOCaL??e0uz5dyW7_RH|nD zA5YOPvGnM~JFTVRKMQzV;_S}C+2uR=&b2=q1VyKp+g;WB^n@`_&Nw9lzk;RW_!*q}`NfGo@arL%-gIH_xuP(m-c--lK4$T)XXmv6kcDJr^-KamW6P zBCP5b&7Y2dU1dV#nSf@f&FBQYTdVMM{N|F?fSaO5~|JOTyi^s;5 z8T2z;Z5}QhIOx2P!`RKR!^Y~b?0S7`ty|y4n0MmAepVB`SUZEZIO}PKB3H~Z*X6_W z72DQ#cfwO+xHH-L1R{^~eU}Aa=rfQa{^40g zW={9gjWNl+^cg916_?a!ShqTF$e^xT_OSm4?WYg90~SW zdjcg`{S}mqUPThogSzQ_GX=NN0`3!I*Q<^3o|*>4%zn|<0S2-B4{yeI0S7y;IKAVi zm4lE)J3jd6{k^1ezST!UlKri1(k}ZNzaO#+Jxkt9My1+t?#!jH*=(XTC~b|PJl;qs zgNUgP5MBR4WGV?wMI`*gs6ahh`+=P3bGp3s3hix8wL#Au1X~tSJhd*ld` z$Fr&;z@QSI&2;GfXKN?MB4fKSJyr$ zH^>*Z{V6;)z{mX->szMquy8w&n(QqJo);$cY6pDVjv6`}6#Qn5cKAJeWJ6Kr;jorD z#M~$qWAmH!&yLC%I{;F|hXM=4Y2m(<7N#Ua^{J?c%D4qZL>2kyGt|>ex4Bq$P2~s# z2wT2;*)9uvb*Nt-rt!k*M5|3%XM-@6h*Y-)(Mt9GX6*E$A|i>Zg8|19L;rIa;GJwT ztGc*%di;B50b+KEId)FbQ9RAw@ir83osZ5MKx>m);vmQf2D6;rz6XZQ%3RCr4aZF#3HEPMh?!gXI4>xqbmnOO}?YgQ@VGdpk{e}hCAdqFor zPq%QLQYTut0M|Ws^*?OJqBti>2E%=QAusVVhUg*qF+9_K|2IM5~D;)bmm%S7XzZl(V z{>^{LIpLsE*6x+G6!f_^%wIkCOOqL3kpJorzLeqit8;)hPEW{JP<=9;Yj56N4>*^0 zzUO5r=}u!T#mZw%!cUEp8BDZX$sviad|9|2iqr9Efa;^raarDZG}NKeck!ND*iL@}2t9O#++Lz;y+ONmH<`k{kf5()JW zRQC_HC0A=L$_i(aw&e97M9E#sPhR-!c4{{diYeMwnB}Y7gf5hBr=mz!nUj>;DtB|q zgEfVEKZg5EAz#kmZ7mhih&T8{i&$h(DF($p6U_fBE016n0vL|G~v;DYTIIFQaB;FK zu=evy4+{0@lfK3J&o8)!L!rQnw3{8>o{F@y=;--y#XujBpFP4Tr>5QA9BaLbPMi5%ExO=WQT1@es@(i5)+y#7shBT zIyax(j%c*mZv4V!Cc>4P8{)vuvl+<^ruGwH5(va<5b1&DXbzwd_(civcMo7L_I@F z<2jMSv;)E6ecJT>!&c}NU(RnUHbXg5?q$V;@-00T(jPyM=$q$05s@Oh-&ve*`LB{J z%N9=lT@qLz)poGHZe}b@lmud!Uh&vndP3}41liZ^!C%kOUZmg4O}{L5=Z`qbZjNQx#=>bcsY{gBT-qXVBu$wW7#egV^`u+ z$lmd$Yw2P4f!Uk&dVz@{BDG0M;}G~xL!)kx%#xd7JQU5szIMCPz%fJGrPJ!54yetw z-3E5%4Cu=ux^bhe(6DT)o%S)V}wO!F7teY$K*8^YycX|%E5W+1~Vt^1hM5|@d znn^}9xd0+Cvwt2ti#kLZ(c+qG)0R=BfpP>x>qO~88XM0`VitT;IQvtGa9POz0kA(n z4W(te-oM0t=(g447v~}jB2U4Yh|DC52)a=zpwrdXeho-|)wEkt=lgu};w8#V*`I?D z?0r|(lVgp9Y>5KC6?0M-*!Zc;*Rpr=zA5sE63%`rxVHw5 z6?)-yrK8e2)9o4QxMWua6NoZ4ZZll*NVU+!u!JsbRlf-x@IbpqCXw-HmsTaotzMgO+DJ~ND<%WzaUH%r_~ zlCmDKeJY?^hp1Ik-)a4>-CP`K*kHqLAc-})twt3c$k0A&nLu=6;F0XftDte}p@Gvg!g2No%OP84b4hk;QJQLJ@DIzHmitZnN@HjBj5FE?Yb1!((t&9ZhI(Wtahevo2n*AwBi1>S}sc)gL?NPt{3cJXPy~m zolnMn*@BtA2j7VxoFpE3RwSM3p-QQGBN**ASwU^|(djj3@1qVI6VCKml<&xgBt5=l zbq_Oj9P6F+irbZD4TLTL95?IFx;TJ!iHlD);0mfwbvEL!i<&oGLDS#u$O&=+%G9{N zBY4cgZ)|yFULB$!B>By`;#h6pe3>btAI~DbK=5>yF5n$XsH^uodjePnfC`9b4XXpKJj~fc@?*bmBeibvlluY}M#z?zb zwgZ)B{44y4X9V$bL9_ILr*o*iq~U$(H1V=F-w@5~9GPf0;-yrbl4>i9n*<vSe2(K+?lQ-kLB~p-mN+Z}7d8n9*!n;ahdZo*DL6w#ilwHSKW9 ze}{yx38!u`-S712wPqewq{X`+Iuc9clmtNxmx9!;c>rDm4GgqUim(Ej<9Q6O{~je& zjw&TklMcyh%RwRwRvRI&}&MvPV5UFo9Y$Har2_8G~KQc+pYih8_5z zMl0+v@~vF*38!ELIu@#u-2V_0%96psjstu60NC3W4n8PSYC0o!t=IoGW3rdu;FgCS z?16vxvKPwkyDKjJ{%(qy-5mHAl8L z-v&~@LfuUEch;z0Uwek@M$Q4Y#&D(nQ|Q}ftQkHzdtvDPMX-GBZtxL1S=A4`U9!O(k- z!-wLa1aJL%t3h_^C~(lKuPE-?0YoSCPs7C_^$uwoCS1J`Q)E+lHfvD;Aq>qKc%Nj> zLr3rpIA@MPr3wgQkvBB1>245fDk_CWT%ygC^N+|>FL4&Db6WD-dDr6fW;VMtaO_7W zKYx+&?D3l_?2|4H6XJ$o-`b_jVcQQEQ1zC(Dur~JcLrFESF44x%NWo7s)vv`WIBq4~TwwWyl@@=lN%}mV;D31`VO4L$G z)}ZP$il%b2orK_i*=91^#KidP-@SSkshHtxU1(ij)aSm zeG7bG7e_W|t+kTKn_H{5sC4dG?5cMva!qhtOD6NOn9Hj#aX73IkIDCGJ*cQ}_>DZv z-hrM|f>OMx-lb}(=rU+m?K_KGVMR7M;2=X+dUNcFo4P!p)ZW`tw`-utsmjR0B_73{H{!j121}BS8 zNlTV534}q3;JSDXvVa(f~vzsgdHxo5y;4fDF*a(Q$lKaM%b+$d_E5?&$Bb zjr7utq<{82<9fyVrjw6LufN?+<85V(kCZu>CvC~IBkVCnOK{-6P;C&Y!047jZw+Lp zf+lmY74E(?2Bm&lDji#xqA0s-5~;ZSSXQXv=_8_`nfegZ(-7C~!Eu}^JdD~px}Wn3 z`OVtRvnI;ztllRq1BeNTReimByC2~F2WdYeMF>a6_&(a3eJ(Im?`KFLbQbI)YKx%O z$>cgde?U4T13f70v;%EM48Kv6yA$vzqkMUUW%xfPy+>&FB-8OC|k$JUXq1t z?x@c2`2}n&388|Gv$!so)QP~RqFc01}q{Wuw%siQTBIL|(8vL532UP0B>)dW}h^eLFCD#m9B1Gx| zHP_vyNSX+6wC1J^{2bS@94Y%V?NdKyr5pip{(+8sAi|8rO>L`R|7 z-Bi{C>Pt0wKZRs8BY?>ug0>^!DUDmbnG=2tlLfw-3CP&8-FLj%e@PA`9z9bkpbZ6T z^KGP5g$RpJ7Lc~Gq~`$Jp_2=&vsW{z_j?v_vXb|NDdGI|kTKf_QDR9jQfO!udLt_1 zov=OCD+<}ziCJX+lPHn4vN*v+GYH(-EN@Zx~2WC z-B%sp?0NhdEw8K?w^~n%g|=%Gx4ZKT=yb$LPZBBys*;ge8ZhZ^Kfx?_2^iW{^rENK z3hCTNBsfl@K1W1Jz4beon=U&p)}X61r;O0zex;&p1E&JPZnwbMRIwL5CA#k)=0C&? zuIRC|1&_1?I$%c6-e|ue(YGfS9=HALb=eVp0}uVwb0s!CYplgnB$)Xh@kIXm$jWBz zFsX>RkiOuoIh!QtZ#abwMr#6C3h4U+uJU{WV2oUxc{>TR8_3Jf*Zr&&KUjZ#)Ag2* zG2QPscf7!37NFx$ILihw4nP#yfg4zk59@*Jakgj>U4`J{8Lklx;Ei{l!EhadRJ2=8 zcJW59r@yuZGt)*ATL_YPhKg+v= zAM)wWy%U9$KBLN{su4v(m}5Jh+3!?$p5!X-I~xiH!l~CLtGml+57T!?`xu*dE(#8} z2IJ@MO9&IZ+0#06(YRV%Nu&q#x?Opgzr|NgMGU$@La)O>hwL7vg?;O?ogK~NJ3G!t zno<~#ZQ5G6TqmuS=|GV#F@JkaM|k&k>-J>Kc<{oHS!GO{Xr}I@z79Y~bEp{=*LuB{k1hry;yxcJzBezZP~sl|t<*?PcIf3IWjM zcO&jL#1sHBZt~^r-945H7hi^<522k^Ok7eQgeAW{MN+)gXqnbMEaLdYZI`&Two|Pu zj}ZZPTAfGZq7e>K%Z^eP4*-;acVwitD) z00ufJyZWY4Lx99ve$%ID?#G%8V?YmPuQ)I6W+uU&CIemG@p!%e;T&*5%z|Vq8<4;_ z_)%UAbV}X4mQcr@Z7*`qi_E*ZMaOhO{()lfg~5G)u`y;O3DAfc^!~%q+7!$3a6gh= z=Ir&yFeF&IVOd!Ro)W+t#2XCI3dqp_@ZxaNeTz*^LMdzE=lw6`kLD@0Ofzu2;FrY}C*yb@c8oxjs{))Z{Y`ol;x;Ra>l_ zsjwN~lZhmdBX^{GcQ1AUoqTIkVD+%#wT^6@{DsDhN?W+xrABC{-|Y^MbCy^B^Q3k% zMR&|n#^}l#>7%l@1+%i+gba%DI-{>Xeog@9^^#)#=C9p#zM`1wb1~KGDL?`O-HMH% zIUWViB??5PkkVcmE$%EpxVf^!CbXP8lUp6oZYP0J`Qp%Bfyr9R8JA0z)=RqRy3HlB zxW$gE0@w5ja56Xx<=?W1stM(a4{@|k(~U1^?T><=@lN_TT@<@(tX5r@&)A@Ph_S$8 zz-pqytUbx`^7ks=*ezsG9Q)_qg>#IjDZ3xiAywZ5vOlab_Q^~@QfcskL7BS&jHD8h zU5)Eh*?wi>!*t)AD~#XalKjbE=)W1CO(y|F(rL*D}k=dz8qMX5p^E2kn^ z@a^XO2k-1{wC9GXl=E{KF5OQd%|*vg4W*JOIC`Q@lhM4j7J;mzUsf<|-6 zbHO4e=`LCYPwD(0eS;b$QF-N!d;<>jjena)H4AK-+hpi@5aOA2!h_>1?dp^MdM)Zq zZ9~x!*3Am{OeHHhu>BM7(=6>wB4Vxgp`t}`%Au-3Zy_~rE4KoV(MQ#fh|i`osREr% zh~&O-MfjfoHs79?AKN(<6R&WgE;5mH2qlq<;TCFh+mU$kHLdW8Mpq1rpiKlHVvaF- z*>5TB(v#3*?JX4XW^69xJ5{c?(pU`wy-JtKoHC{oxEWxn_5ce-%UO!Zfpr72<1ko72RUdgU z+6h0<4A;)JxAb(`5_}@cjUt<3_|=t^DSq6SB7SBh#}w#6sTs)hk~DIW?MU&=Vb*Vb zP9k-TCiMC0wGyW|r&L)`Io@c6Gafl}=}dPgExX-0W9A19Qt90r164v>Upc+vk?res z6mz;cY!M&`IsVtHY%UZe+`*xXG8wBoce!-0Bg(l(NY|(t0%BXb1au#z*-C%&(8@M< zV+gIo37jn8`BA-X0$jg^xz28k++0^%?9s-8GTqn|fw#Y$^{TnbeJ;JEpEO0;aBOK1 zyPow_-Df`G&#`o@kOx6D`w_EEdtb#X138K|#)4OCD^tEN7dy^`uAX`^W>UGYC);w) z0cLB|-0KXRnxc~!5Y@c=Su3kqCpo}>CC@xuKyhw3tR&}wQ}Y`>M(rHb6n~Owl@F^n zrj3>zWC7HRy>vr~dXFzri@SHZj3Gx@3gi4X%hkEi>(EB^M_&*b9j|WA%A5*w&dr4b zFb#$Y@%8%Dj0MpuK4;x(sQM!tr!@QP?wloabV@zizGkwZ zlG4Y3Ab-nq-rx~#kKzgGNfJn}otISm2?acq5~~sJL$j~z-c-frb)ChR_GO#$fgPS? zsu7TGigZQIbRKq0Z_2eIXI==w7e;(%qN+CM$98+2?INFgvvUI777rV;SxcW)Y@t#~ z8S*k#lbG72q|qCE9={H2l-xQRd7aNHD8+pTA+W;;fFqzwwK0NxYuPbt<$@${kKlSk zlUxto7t4WSQ_r!XbI8P;Xo3`#Q%TEA#MM7Flkrc0gHcKDd@4A?!&RB@1NvJPhL4C) zj;yJmfIr~PD10IULLv$@<_=*!L$PQ<8Z@`X$2Q-ugT3$|O>Uh}yV-POvxeWEKDr^V zAUQWVxf%5Q^kq+?%nao4EYIP4SkVNsAzJC#Sy-BUP|`qW6vG#o^lkPvxuu{XwD8yC zuM@*9TUn4r{oW8V7mDUso5I<&hH&c32B26T4C~(OdD|4mif3FgHoBGZ zLi#Zk1(20Av#+>VVPBFnJ7UWZ-J|jqNks`PXcdOM(nK6G14&y%NEnWt-Zy0q0EJ5n z=nE0>W8kKPFJ-3o_(if@pxv()%i5(xF#c})n?;DMPOY&Ca!&7BNJnBuw}JP^0;TK z-X||9p;g%XaY&la%s!5A#_JL^b^dMp3He=CleGreat{oqUxoDs06^+^Hcs{*?J=ldfHjL@B^211?~F z;o7rD+>b@PEX=t{7Z{9uO5>1vo_)~!r+2=C4KI9XNs%eWc7x)4(~j>5xaCO-Z>OQY~VfgtqT=had5%7Pw$ z?_UEQHi(lS`0DmLS8)B^GcE9+8<01pKBCf@?3WoPOmR2n-p@N&Wd@RgcxZ;87^AC9 zs}lqUn^~C2@O+aOVpnVD;9Xv5##npKJq!LT^|55c8Zc%pM z8!re5G71jDfTRo}0}Ne*lp>)>=MYLs=g^JB$N&R~v@|H7A`IQ#jiPj?bV|p0@cn(y zb)05So#%=9Wdhc#OnQMM> zb=lY+k;bDnPapO{=QY0dZ$L8uL`a}JEjb^5F5l#Ix@X0LR17xL#IL!gCVZ4FQ{p`- zIq<`8U{yukWo?2eSu7$Dn_5eaP0S!zR9#Aa>Ja4+C(=h2?bjZJ!wf3d}dEsnKE=OPqMIOxsNXl*XlwK1+-JKxM{<`!d9>IUuNP&+O3ju0U?zq?*0F^g$zuS zCIYEbgOIE6Ap84R^4&_!CmA`^r=)Q+!Z1@x4#6a4j-H=ksvZU3(K)~2*l)4m1|fJP z0YS~|{D|S~a{GhHFm!2-5tx&jqo(Kp1@i4i!Z@Ut)JUmLE?bM41c!sfxdOjvbAcAl zs*9W#ro!W(Ow>)e>r!p?0%usGUGs%Bsy|2ezLb0ZH1}^lymBaI=V;%Dou$4G9^Qu{ zy)<7&l~IvCs8r!$L7H$i&y&Q}c?TH!6TSAO>P!~NAFb4X84Jj#bc^w*IPd@|BnW^H z9Q6dK;UsUm--J`XCZNq_@}-2S-S9Xu)5po&%e7%lj~j&OCWr$chZYhB9?r(qhetm* z1@;oH8W7JxC!GJ{Iq4kJXz-4FV%Y;q%3P`2ji(CSKF77_kMkQA>$9teOQ~yA3ZHPx z^P&Ba*S5x)W*Snp6XR$ziM54aolTATx^6mo$obQsPg3%i+sQ@Yn%_)wRqlhS#&@X~ zJh)ZLDh8MDhz#3Dy+e9x*~%b$E|l#L(njcJ9L^Z!dFcQ8oEnnRPrX~6iISPvwHKE- z0E?+pylQIxyM&~0tx&}Upx@qr1j^u2lEs_szj%w3F#)VxnY4}U`A&81A+2hIW+>sOEn{YRF_6}pE{$GubACIwZ3_6U2A%foX?b6{ zYL!#lK9~DIoLK-ub134zCxA9T1R7_Q+3eG29jI68UG-Z2JrQO#S)LFf_aEiz&CC#|jt!e#(d<^im6Mr;jp~gtu0erwsgbaYyWJWiT zT}N9d|9eC9w!~~%%~`6Dkm*8nHc)8(HaWyZXe2L6Z7yk=sY?YKC@p(GES5#lJ=(^h zFK>W-rK7ayx4 z_hNZlmGF0mN2x`6c zR&eK~Gkq${`i&yrZ1HdUwx%Cvil(6DJX1j{J>US!rE@WyN0D77acpf2*DKQe*LOB1 zZy5mUCCgt`tP>=tn!c(WUeToP<|dc;FRSA$?KUkoF4_ehLRD@e>0J@}{Ro77iyTSa zt$iOEBd`QD@bTxE{i&`)|I%fy7_qgC|CHCMmP@I9HSpW_(2-?dv07_o0K*328F-2GEtxv{$bgFsyx3G+MpV zk|&oWL*ISi+9{Lgb^hD!zgV}kmrC0`tTlzbrA!_m6GY(iT*8tO->zd{k(@S_6id*jVaH=_S@6?ckrVGY7ODAk|~alRzdej^*mOH zKqXH4$fPz1yrLrRaTymSD4wYSE!75x*0deg+4#~15epB zog$R))bTEZKMT@_EGp;+H0QVL+MAJ;SWb(mSNEvCY2UH@aU+qp2?GD|@D1ugU{6h7 znP|#zbGag9Skl~h>>dPS5#wK?cf7BruXxEdzxUOUmIXOjl%1+abL;wn8wy%144emP z=1-=#YLf}Ct0gX*{bLzKKLPi1G1HQDP|>wJq>Xg|6XWks z^ELviX)a+(ou6R!W-fZGcmRwTGnerK;}4Xo1j`A1Ffuv8A~il6rI4F@yiD1;i!vbR zF?_o{5iiFB)~afaj#9lXqm~bsDy4TdAAWlJV>Q}VvzR8FDx@SqWntiglg|u=XYFgy zM-s33OubF9wDVMsvb<<8Ckt)Zb|S9d7D4wi+mQ*C(Fc?p8N@b3j?(=|1#L+dd#%VP zX%yZETL7I-L@4^@JV`o4{s>8XW_KY6hS{_-zuv7h(~}ptC`6NR3?qJjv(HR6pbLxF zntz!7DF`kdGQlszH6TS^yL%I7d*5*_*21C%D1gvE#=IxmNA3P*9tE71{JM=Z9jt1! z{(}0rC~eI%z3e~)-jzH1 z1#@Qks0!uoh$;1lR?6@N$`v`)1x^?+ky4PxHL`J?LswRF4Is;y_ve4DK;O(Hi5`uZ z2D5%LdOG=0{QhX^YUETCP!b2a1f7NH|E(X;`>yG05@v~SVDWEF+H%buC*+QCtpo~E z`l_)$)ms|JR~u0|0?GgIU)?u|YbfwO&xvscNu0tR_~IW+W}%_cX6JmfGy$mZ)Z-UB zR++J*nk9(Kw8vw%+sZ*a(_8=~k*jR7QaCev(ckt1nKEztYQvYbBwaAgeM~2k!#{Vw ze(w0@!D$usUPJN>NxWklX~Tk9(Ok5cnaE|8F=5YXkPX&|4;&93+xYclOGD<2E`YCE zwn@D>lmt&_@q41fvktFmaiepOiD+|U*Tao+Rb%(O9r_j3?{B^f=2()MHBaz{ew!4t?cA}|2Xnh_Jnv!Bm0kQ)UeRXPJV5R!%PhSp zqbZX|=x%8d+>NCk`pD-R7WUkj7}4;$Tw?gQ^xT%8<;V;=N^%dNA%{`@D}R1G*4JCRb%*xE zZ~e&f%GbMc+I6nzN7Vg)*Ott+qQ_}12+x*lr8#Q;7uKUox>r zuEz7w4GPytsE^Vf($CmQq~2Maz5RSUoun(V8j|`vJYOC>_PG`fXn+yt7Ofo%*cj&v ze*u(P=M#f38U!Qh13|Oc`*)kAk9O#n9kI=U=>2?GMB+4rBbljaX7cJX7Jc87>oyM{ z=v4x#xD{&*Q}p=8AZ2vvZMzXQjD$o50aJ zBqb=^ojFZcs@R4aXCCoNhu`@Tuj2#ND*m10g&$EFG9$1*H=u=Tdl1fVOcjL#166Pz zE)%C5gIovM(%wF+T(sQ>o8tsb2PgaD<4=ml;iIzQGG;TlY~9zb!#xZD$2ZtnnDT$o z^&_%M-pXjYzih@*>Jxq3&ws59Xc+F_IUFOp;4R0Y597ErAuCsH@err5h1C)H6cton zAIV=v@5&mu0|%LN$VQ@20YQ1c54i>Pg>7*6cbeP$)LOtJ2}hm)>1)~j@r%us1EQ9m%3*RZZd(wBkxpTRYR4-wg2VHkSYIKN2nea z7Q7b2c}dUN>;63@T2poz3l|$?k-|sF$J!3J0x5@yk$WAT})I?8~Rrl z7%t6h(1D;geQDx$YBGMet-Bz^&NPBFb&gynxHSGO$H*CU|IG|VSJ94NCM#q=8y9EO zYJP`F!oiqm==~e9TqcEJ&aQhb$E$4MpOw)P7-phy5nB?R8<)K@O%x{@G+U7mS?fMB zv*l?{QsXGxe}@0ONj(clx)=q7M_C9ik6Z#_G*7W2JS_y50U z@yIG2qQh&A**}iCvUt5|o zyUT;3!uDS>#tprLkUD4c2OblUU;~U;w8`{d*#5ylCPC$K zPZUDr6G>7Hl` z(1|3Aw0w7{FCzZ(Mi4#KV?Rw8$siDEwy=2BQl8`u^< zihMOUcH*Qi_hWp!uBN2&#@jCPPXmetXF{~|L*8wqlr0UTZDZl!Z0&C;o*SKd#M>Ur zI}F5C_W+4(!&=FFourc#F74y(t3}D(^;)QK!ocHhQMD#(vit#vgHajJIsQe?qiU|;|0P;cxnC7(> zzHq)XvhYtxBvk$DNSSJW6?&z0uJ^~7v>WOm4dShkBYMM!JLyXO9>~sYDsoy?noGzZ}*%HO!)2d6kJZop~5@64fIF<2?J3 z8HO(6(Mx1icFsTyO)x5j#U#n%7X8UY^vg>1IW%ypc?NaX^7{2>WLF29K`%X>O}B<1 z3cnNV_N(T@-#Ye#xypPj+`v)4AjbG*CGUUn5KCz-s#bm`)%6cXw^KKOdCC! z_We$0`X!VVz+M;4PgN%>-7W8O)#D!dlbZt7md*2lN~5gtLFqk0*_Ort=}O- z_nQK@rZ$I%!Lu?*ll3Z9%B3bbzp4M85v`ujX!J!#flkVG^(1utDRVZbHjSFG*qp@B z4c>|ZkZBmw(qO?bdao35G1h7fc*pKn<`*O085H_+N%@TTqT@`@QK$Alt+Tq8O4y37 z24;$!)?hvpSv1o;Nf3uv#W3MGF7b(O>F%_{#;7^bd_l?Ml~}H-Rs1_@FPXKzl8<*q(=k?=o~-!SQ}j z#>GlxgL?Ise4aqFHz@r-z0|u+u%4ohpYu+a;+13NckD03i7UwHpLFh;9$O6EG)R@C zTJb9FCrxPR__ND7+gs$r~$vIJ?w*HDNvN7JFF?kOJcz(q7nL;Ln-1@?T+)o zo1gqItET!Zo`WAedxy(n{xfv9xM;C?Yv?J!KOnbDgy&;`&z#g5AO zx&F8I56hi#sU)CvHDqU>a8)~kYu|)3YyR$(ATKEu*{@?$3F?~wn-}qT-YBf(>JtLE z5HiGn^fhbD_bzW44Oq-QwgJmgmJDGaBaX%iBn+f+bnY5BdZokdS5Yc6+mTy!dznsW zA!>PA>f$pT(f8dSaxrM5f|c&Qb)?sR+>a$I`>=5QDVgqUq@xa*3Dd3h=gYYgWdLMf z|5-y<>>|3T`_mfBFV^{-U*kPKbk2OAd&r{50LnF+)|lVf_mFgr3|UpUI5l>!BTfAE zjsD{ur%WSqsW|46-aZT&H?d#J)h9GQD?B#(jCE3bgt6#DZ3PLJ?qxn^mh{fj z^jB+RM=MZFn`c^TPxCn22ihj5GNkb&z2IGD!vX1?YN5(X=w10-Y}?^h1TN=X;CS)# zuRIMl_O#ed9Lp;j)MAkge|ksnzG`(eIx}^j#d6J(u<*4NVUL}778pc*<5kTK+3@kH zpD@j=AD9PF0cly8w?j%1KfVmM=iQX;VHIr13`O z&h-Y~<3HJKe{=^(uzys|z~i+~nTk&e1XQ`KWYLBUZPazZ3;Uw$@P>*Kd&`71HQX0WbBMZ46&L`cT^oY`1B`Ye@XHDFT4tC5Esm5$|k&y@n zFNc*;z9{GP2A#)P`($!X!|i~f2@#KS#xvl#78jyz(p-FNd2%F0d+1+2*ert-2mC97 zd2)`k-5sboTzlWvZr+=_RmGUJSpCLGQzr7l3e2H3ev+hF+xBdH{!hh_<65>muFk~R z=fr?Q@M}DY=(yhVKFTj7{Vx2l6&uk#IZk}`5E9l-uxjSN?(i|{m><_ zR)rXHd2|1kQ7-%A5tl%A%|!Eq9u2oXp#K8(Fss43sltLUm51XWMsNE*ZzbBUs8gjT9f^3t{dVwsnMtqOm(7hDZkO%4(^ zAUP5ab0z0cby<4u4>onG%S9Il{;l0HDmVH*j+H$=JMfK)xDpQQD@1Ks`f(GY)XTb)yjWzzHE7Gg5DAQ-N(5GoA zF971k&uS+-Q@v`4IyZLvl&Y55AotK~IQI)*_RYQ87~(NDbD)?HSSj#=A>@E7hCrZM z+tqXvb{Nf9t4JZ3pG3mCq(^CG?T%-9kB~G@tK_;xGMUX4ukAb+5q4iye%QdwOiiz4`ev>^jeM}sJ;T`1JZqPaqgJ} zy)9kRc+3`oFqe_c>e$wo!6GGLz&!gU{bYSRgaiJENqm$$BUklBvM?6t1MJ(YKFMtM zC&!FVu&+dyNePzIO|R(6Wj_}T1bVqZ`N+ecsiMc^j1{VYG?G$kB^3)ULi5>Z$A=?6 z!|GCR#&g77SKEpwgsu%-nD@SAUst-rcrb9gJ}{@}bbnC!OnF=E%uI7Sr@3iM0{(G+ z6-c667se53yv*(9UKpEidY3J--CmDg3|+>-+IH&=_@L(VPor0%DTwiE{6l{MDly@( zw-Y9GxlvnW{HeJn$?yFGQ42pk^L0f!Tzb7Sot=(P*dmd!GH+Z)+xXm6;GvCwo!^3} z-y&=Y8>=aEywx76r+o62;Nq`MYWg4K7J=!R5HXpd1N441q#lLtqPOW5<*qXAu^W0M zL*0^LGqjW~{XCCM&sOk{KbM2DWsNkyn$F!qU9uBG#Z(?KC=%Y$mn4Uo&TUe2-H!G@ z>%QPaUNUfG%~2KYLZ8%AthhA|L9{W?YPiEC5+4k;A-^?yZg2=`rOcpY;VuQ1Q3EY! zoxdi7FJEuP$d}?$QKVx7g3!dDJ=OG`d{LPAFlhTI*T#40c}*!6GS{Sz-Ld!MG-8?H zYVyvgN3Zy^l{;MWCb@|eDipAR#aC5&;f-6CGXw4$Ige)g^d5xOzpgVlzL>yExpcc} zf3RUcV)>?*M(#~(d=VLX^%eG`Dt3cn7nklT#b2i+(=8{Opx8F?@XG1_iomIKX44;% z?1%0w+Sta}&An5;tmebEeVM%T!&vlZsPM@Jc8&M<_M?W5v7=(M0I5sd5Y$?o`H2nxO$eQ40VtV=?D|Ufm9G z3*DFw6y2YeEgJ!~6zZ5fTu+DjupKDNg;Wu@DcmQt*M9?wbG%s-&V_tFa7vlu%lA<_ zdhhtSBjSHJD2Dg@XIWf|K@E*}AjGsM=ZP(4K;_?La>oo~vkmy+?HlXVEwK>pQv2Bu z>SUxx%7|70OZKC=G*CyYRW9PQ5!YB}^8+yJJ(<{AgJJ1(Qj;aSeQNK|vFI02`cc_1 zhuN{434iZ!GFI3a(72v(6f%U)c@fC|(R^=LEZ153;Si$42>8)+kv^LCr@x1`vf6Mw zv~&kEK`US11*_*5*?t>)mN&DD5?sBcTyyomtyN?31vB})%7R~6R+%G7P%qqJSbGx<# zX9Aq9{DaF$T8vV1TPCE#Ya8&#GhndPdpNWk$)Ow+i1xq4Z2Nmsw@9tNtt~XKAXdWu zi8x_^v!@z~7JM|FG-q1)2!JWmggv6-fcj3}+~+NWlM)2dh27=({G_BCk_N^50dlzqS#JY-Iu$N{tf|gzIO&Lhg1ncUsyY#4L@FM#I;U4kfIMq&GA-FJb zTL)ee0RB)9a&~vmD!PtvqRl8UJG`0vo*F9O=&5%`|9Ab(yib5F75#$-;eUw3abR8; z)rgT4p^M90i8*(v^}NNj=Fn56CXwy=|65z2+te3lqGmYdalpF`;UOC6Jo3}DDC zQb1iOmT+H#f^`~>sbwi)ndrBdz@k1=%O^_UEHNk{sHB> z&UBi5Q8SEgc#PQ1%k<1D>;ZT7ds1ML;4dNGt$uDdC+@qVu3cyt{@>Gf_Y3kq zv+ytJNNz~=&SvF=G#I7Gv}HfS4}he~^lf3Qy}u^hI6#<0E48-f345vh{I%vTWiHf3 z&=JnAJK5v=<~~fvkTAk_K`N3h_x|o5V`e2{Fn94CGV#vCvYhbIaEPDnY;(aXeq|+! z>r&L`?(vy}Kz!R@7Gg&y_q&Joh5>;1!{~Sa6ZjMZ=B?C?S+6O9yqpaOY|5*SMdzq+ zl5biXDXY`=8>C_0MNSn&hh(-UjpjEkw24e|H;xKth-RmTxewqrnNAF1A3wy8uyBCc zM9CG)=JSp=9%?JlpE4FRl#`lsP|NeJ9Me%UWXss%UZd6GqoL;QlnLH}L`OWM&2Mrr zRBpFTW6@Vv^}Xp2q|Is#Y~-{bbvF2R>*i?>NDnJRF-wd!8VLokq>Y9bbTOvS**nf~ zwgN9_WID%1_?!-v-Ua%qXR0R%T24Kr(i>L6cVB4rD35rR{y}l;Z|mHolwzD|kEvV2 zf&@*`#)dahl>Z@0&>_Zum?*6j~_Ixz`wtrpg{_^5s*4RES0Tk z^lQ19p~LgQ-|vC1*Z%!--W;S!-ECRYh#7f&({ipJ7I9^=z$78g!P+W`4^qUdzs7-X zg~|PsGT_DxqiUm;;n0@J8|?B&wi2KTW*nUrBLwI3ihVg#1p#~KO^20jDYnR0EwqqF z;R{^_>hifaXHT0A@9r(#&c82(_q|#o+;-D;rGH{oAhOt&^NVfxOjGlkyiDyPWo{zD z)nCimkZsy9qtyvA>V_nIxUPxQtcsr)Lqo$t)X_{{RDCvQc9)09SHj9U+U>xZyAA&O z6d?7(9E7pjA}znPAjcqr+|dg4Q$GMNz7eyho9KXmf0jOd#nVLR>nTJrSGB` z85EtpSj;6=HNvUXNO>LNgL9UXWp(JQ{Lhv*`I?ODuD6ZhX?P_pTH6|{`5Bqr<$B@Dz@yAK;+b6 z^f2@O&|O10z%rGLQ0?NiCQdpY3aeo0l zDWaE4-G?-SI)&r^p30Eu|D{GxgNY6SJVK*J-KFS#eH}U>dxPe-&{`{cNZE5q)f+J@$-Hh>3)iLRkA_y+Mi%#@1lu9TnSR_U^rQE5rWvD57hbo`&u#9Qi%`>+dD9SyJrt3>vi)oWC35Ff zZ|zO?7#n`sM`60W-{qKZ&uZ|BS-MLe5Dnyb1OHF^DhA4-{BY^K`w9K2z@UpmvQ7sY zh33*Mw41X+)#^4F%cP`COh8#{_aE*j-iVV6zENYlWM`LYlI-$la#Pf2^iX!=t15|z z!3k<%%%JjAQ`?|(k5~DPBHH)<2ctT~Ahz`UVPJ4`@Is+#3a&COH5>VD&zT!Evsl!z z#`^;>?{Mf5%JDERWqq<-v~DoB41H&o-r_=31A58P(Fd8(y@<&+y1EUMn&tb&`2No3 z=}J4!k7!pr_xw2H)YKzenpQABnUbfo$Qes37q6JWyW+EJC@QZQzUVw_4?FsjvMvi9 zO^lR9feVnBEYvH%<&QpxrM4k|ncdgGLSgDJXL&V_Lv2)z1C$U%@fR>f3EqO@)dK#qvr9d zzxMQYdvAFEzXbVd@b_?u>mIQpRGsnO0s;chvj~B?U7D~xbPF(fJZhS{S;U3(05qm% zzYP%lLX9MG$@KC|zu1jVoZx2%O225%#ns(wf{?d5j)f(ve8{h-W&IUt6EN{Fqd;gD zQuuu}Bx(65amU?4O8keI==teGVUuo3Oo$t7yE&0RHopeFcjtrgQ$HyKJ5^#49qyx) zw#sWq7;d0WDX1(@sfL2GC(~B)mBi4sR9CB3pqUJ~Y@3>@?~wM1g;9R&xMlIuNKP2$ zB1c)lThi~0Jls;5}w6CGrOfCRZ|fiqx6S7*IAlyugC;-Ky~nD%hne9ZB9I+)8gHHCbT9Cm5zG z7t6SvK)V!TEq1 zh~F4+uEc`L-AJA5gMK9PT3G_DBz`%7N`}0nQv8u+q+5PF&yY!ksSCsTB_{d#9dxKh zDU;W!6w&$X4%9@NAFbsGS)wEZ;_hEPPM|uP_n?9QzeJ0Ity$fv7(_&T0@%_5nFh>i zT;D^JzEHlcNvQ)9CZ9(W4ILs6A8M~Ak`qUywf$1}L+X#AW1gL(UePa4>};+Uyh5HW zbXW@H@upF9h)fFNFj(bGM92oOHsIl__F2OO72RR9_X zNyE+2!#+E;T~4HRn#J7NB%+IwurUr%J)s^9D#B_0=u{SEX{2LtvY<2ZV{rmC%;d-` z15tKgBh<_%5kJdE*%yXcil#qiSN^OJ+>rF);f-ZWYyC=`>u$xoqd#}x;=YfY3p#M= z-bn)bNH8qUm=*r(t10<$XMlJgUu(P2vfE;P_ox(ASi?Exc-(845`8&SEEzu9chYS=XCulMBh!1wx2rir9Wo8G&h0^+)0X?0|3XJo*|&I~ zwNtR(=#C?R@ElG`s2MHDWf)3L!qrK17`u$*axB)UC#aoMs0D_@6#@0TEQdD#8@w<| z{L`a~itF4hyt5E{T?m8Kw}#@*k6+?VZ_Y}Ogw4M%B8b5Rxy;9^A$dPEA5dTRR7@xm zFVH~XevEMCA#zbJTb}LaFXRMR)sunFgR65>!k$PoDO1}*^*p@^7VfMyN2facdBQ_V z)UR>rzK<_~|9`F|k z$>{ zKt?nk3Z-Tt_Z^?6SUsMlzx^TT7JoH=GWu|jhXu|Q-q&ekT#X5GC)K4vYSa6JKb=gB zuD%7AQ$!Y^=&<8|l>)I68kyal)O-sQBSogGw)A?UOl;bhVGRjfjiE;!3#wf=0ONsX zK`@cx%bpR1)*6!Bo%T%QaRX~lIpoUEoG^43^brE`;!QL$DV3ZIOcWWq?srkE8sFEW+M+m>$5$pi}$0MoM-p>oqu=OwIIAf{?5}K_n8Na zoyG$dA%Q{XYPAfaCb^KlWQ(X=J|nzAT&4uggN{+7QD-jZjy-SU`x(d(!kz0a$QQokK0?mZ(96yP?as@L_O+po9W>*sI%%WGg z?-)MlEL+;|>U>0JSo+O&?t7ry0eYtC>3xS>Us1V$Mz82Ua)SLS_}YbXos~s7lA)=M zFhO1SR=tO`*8>$ie79lR^b60gq|S|UvvwJ)RZy=FamFvMJ;*dD+bTK>!~Y8d!P02c z#B<~fI?eTBN8NOSU7=s3!-s`O`YkYFj4H?9Y~3U?KYvI+1vX5z0b6OVV#k-uo2-6D zc3u{~f;CFI(7;=}?VPloVHEncrK=N9BNq8AS}CQ}mI40B^dcNl=u+Nt*6WX*d8YO( z9NzuFT<;*v6!|$4&D6#$ru92w{pCUwGE+6_G+N5*fE}Y}$Die~|FCo9aWpVF!#OVW z$qu4Lw0z>xN~3|HXoyN_g{pkY&YadcZ%HVKlenU}zEHApY^h=4G(O_`cX_q#Z)VO~ zF1iToV*A$p4`4X6@QcH=Rbzm&$1>QAuw#sjH3NPSL4x+bpIf8)bwc@08=pe`$si0FuUslF9qq^Co`Wv%7{cw07n;7iKCGt0%eO=JB$HC#%xU6JW% zJJx9ukeMhF_QAkyua>I5vP^Jm%`}7d#!~E-JYy{)g-r81=3SSZ8(|jKC;sL511;P> zPMe9s@;@50E_uuN^0kr#yC59yU$u6Z!wmMHh1}h1TdCYtSJMbNaCQT2cZ+&_Pqe!p zIu>4x47J`=6z_PEZD!Fn*zReN0}FGAHl+S9<--r3DYOM=?&;$B6|D2O4Vj7my~BRo zp6HO$#?HTxhv8#@mtcaj`nWV~duikPs0=E32br3Zp;y^H&sw{a7@j7Ih~fQx4Jm?} z1VtxXiX|JQM{*pgqzLUhcqogGfb0z9BUuzoWaCk`5l|(r+j(h>$=%d=UPt8&-@J(! z9-mx>_Ym|UcuJOUX8gsY-Ydh&PHZ~Q$uKmFaerL#L5##s4efOcuL-@3y7e7oWE?d< zdiGU{yZ4x~NvuM?p)G$%Htf5D?ntqx0M43<*NjxPUR;`A5<45)g$n4eMpWdx&n7Zc za{Gs6_sx5gQb$^=>^7s2iDbhyDP}jVd#Cyf?q?3xadmFF)T;5FoW~SOhMC83%(d_k zn;e}dRf@9XRs2!)79Go#v4zL@(sQ0^s;5y)jZtejGW834rb}?IRJhrJ3d8T72qkkN zkwMMUC+V$C78+s3^mrv7&|!aN1dG6d;&flkYAENUUunoC(b$=Y)Z>5V>V-si1YS<8 zBGoo8slaKSV`ZMsj#=t2;MB&VnK-~6d{cPoPV3|DgG#8=)SEFBSb@@QdW8#XHb7SD zK<=}PPPKMR2O9@bTu-dB0#xBX;pf#b0BZ@G2f;MS>gJ>)a1QliFys?(!IpH22KI2dq``=yH@AL(9TZ zRy{WfnI@y&hf~|`f>WE$Qn=Mc>_=U>Rt6@T(F^mPmoprvSzvgMpDZD09if~9VH1gv z=zP$zWz>O-n?L5S`x7$hMz`=omdhpM*-WD}W0i1J=Y{3tfi+)1f%M?AbS?Uy-sxu z*c*`5DYD(mY<7W_lbK7jw(j{Mo1$0iYW!rM{GZ?3dIM==&36NH-X^v-zHnYB#XaXk zky6xz%VE~+4qAA4>CKjPrd&4VaZW1>;#UrSLB4@?gK^HnvW+-6Zng^_q0vZYJ}hB@5mT5iGWlnnZ2g~We7yv>4e_#1_XnA0 zzZGso8jCt5R$MA-nGn8>6k3Omx*cH=)geE}7T!71M?V1*?HB7OGLu~b0A5W(1)Uh> zdsCzAo5SO{G@wh=T@8zc7qdzl`Z$#*9Id{JG{q zKn;ro%p_fWRuA}o&LI?CoR+SJ4sh?YzXOty4UrZ~jt)t(+;tt`G!oX$MMddVs($;N z4(7;l-Azq-+1~)_=u0e#_+$HW*!0ems+*8^BR+)dwDC=GYAlY<>yU|nDCJ=0@||mR zI?(AP=@c*0#<^xtymrA+F*3wqCFWS)17Nss^l=I1+0T__LF%nF@d5I7@2P74G-@<1O;2eV|+TXOCz1jB3i|JVdzj=-s+yILgIqxNs*4#&ZNB zdXe~)(=6F1=h)T^5}jD_nIW``ReF6_*-b|x%v6B?YHFi~Mr?lR!%FEZ*`8093)8jp_y?8PdYRaFcLJvm$ zk^)(4oh{%wQ(e%2tcQ<-^8QWyqmcUF{WpJu6qIAjK%H4sSq-{JI@aHIZ(cA-!l9f{WS35Cq$iNz`7Na`)po)Yw6$cLv>kr}hxRv%l+Tm! zwXsi%45N&4GG5mgKL7TR2Fc9ommkN}@p={1n`TEHF~d1ZF}f*ShPR(q+%aPGa=sIK zD+gVG=%OzbXh3vn>7QlFJ5Wz=uC~uF+tbsI)9K9ff1m){>gWR+Fuv@m_TipTS17j& zFy_90S2r@Z9A_BZ6R&t!$-2=g^yw3t*2z$ELard0#JcwHY|J^6_OLG^Fn8$a6w|G7Htao#iS#|Lu`RaYx%Voa9!MCE}+V2X1d*fx(1 zpBku+E5QM*zZ)W4eN$nqxRJ?!vh2AVl!KG(dt>G2g~7v`@;u@1ANj8R+1j-KdG)ob zEb7Sq6ZKZUl}5MieEU|(n^nxj@w-fZ!aSsK;=9r6v4;!+@!K~d+4J+ksJqpouCEX~ zR=@r&j00WeV0C8t3zlpe%wX5D4{COlHUBFjv&=>>$3xtd+S{=CLeVzKHu)@GC8h50 zx5oU6iLPV5uARZfu9$uv&glI09GH`MNbguY>eWi)Q{ru!7w=EZd3(yC&u#Q+)Owpo zA`Fnbu6T#GB&{>w2$cGM_Elm($q<6Jh{xDT=<)6JN>h`yZJMu3pOgor&p{VF3Fu3p>x(I|2L$(@ zI#~38X|2TVFc23<8SDrDSidP0^0lXoQAeDpTxy?0+WL3%cP1H2jXZ;zWziF$FERe4 z*KVPGkisqIPNZkd)2*UXr+j*E_89u3eW=Mtosyj-%!_i*oY4bG2V?B_@6=q?Ha|9i z5TU@~|N6cPv3LZ0q2!>QGCOwAwj(|=6z;MgFj3e^izJ^2s zySjU*bBsc3W9rX%gyS5`p6_R|@ADjE#oj zfJjH9W`48wvB^K#5MyRD)TeymGsC?m>3~K)SwuNWs;S(^M7?9CAHE`wCNiFwuNf$T z`>>X>f}>FRj}{+k+cYM+oeI4Pg|pdwB7tFHO2!o9mFas7iY*+|y?|DQ!kN^fxk!OE zAoJNKiK)#N|I`3Kk4&iH<3m=urUKi+2jcp?4lxlR>uWwmn z`4(ZcE1Lf*)5y4IJWI9(Q3&{7p#k&Pk(O?F1V_cU9;z2=)WvdS#W9vXA!Jm9lQQmg?qiDI_KZgV zHQU_Pi=^rg`c`;5wCsQ|6#`x;N6DHkHWbv7}RkT$=NJY0u1SC zd7H-Fo3&KzrnsiWyYq|?aJ~P(Rwj^LeL7A<7&taLZ={o8)Fe9EB3#JN-og&vC zO>Poa*0`?gxZs#d!aL#inN9=~#>;h${YFI}$mC+Y53NKiu!)X??Gk%a{YK%+gqYOc zQ76x_1<0cwiJ9sTm^kgUams1`IlCincqskEvo!BxQ82NVY~mrO-P|{yE)-1vW`>iQ zm{Xf~Vl!_!3G2s|FiS?QU-mi%&$pg!5vS++>^Z1`BY&^;NX8X)L>pW1x_O>O0e-@( zLj-`H@L=PX?da@D@t=~#MheZe*Y~^RAIZ;0hqhpd|2w;;AW0@{8t)Wmsg(@ijD2VC zTh=wLrjS1iCilN^c)Tpa6Pme23H$Z9NJ3Z^5D%>=aS>B1K7_LJDW!0cQrQFp`yd(& z*KIfF3uILcMmxw#osx)EivWX|RI>EAyn`J`<48b@OJV@-`FQAaA;{UEgjafLjTIHC z%LWZxI_CCxp3on|qN=;G95f|w zZu?E_(?z~QymRds&OADwuK$0yddsjV!?j_Tu2GtyLAo3166q48hVJg}QbxL^OF+82 zL!`T;yAh;2_rqH6yT5Pm|8vZr>$&TS^K@xZ%76j0B7A#OjTq7INc6FieFh6ZU45Hk;EmcSH)1+yLH3(+N|n40bEASaBvMWn_lx& zM@Sx5iiN2s;AZc)Z$=W`u3KjtH>6p>;Bg$F*b8ODgfw>ByOozf%)5rwV;1wSU!;Eo zH<%9E{`y>qUgg`oH=H}Z2_K>2@9jk;cdN6*^6E$i_;yh&87L9s5*@kp9>D2~O{=*r z{S?bb4}kuD0bc8E5C8l-kN#-VCIZH{+knyfd?^42&}8%jp4KX(-$-Tut1^B4r&l`Z z{Q4|lSGfCqGtZIByJ{6nd}X6Oz^F8nNlBu>vT3VVy#peW`>;P z)uC87xqCG)+C)b%O?iIi z)34{AonwtYvDy1QUy?eRcmB~*`#q|)8cu(43!js*y_5d%&{9t zOGU-x`X~F{84dO%B?nTD+4?A2G-G<)($<|clwTftE>jNW?fx?hSq6v`7?UNHpEy&d zTewC3YNI81fA5|d%jYOmC$ufq0b3wFZ1wHFI}&WgAg_ENVCk?6qFeLl9C7pY-P|gA zQnzb6Ut+qop4LpMGKNF*Rem?(?UXd;{3PIbPJQ}x12}Jg%9&{|NYQHCvL$C?##Oz~ zgDz0&BPUh&>kl$Tb3)cOlPszt z2KVn9t;F8xGW&|ge65#dHnxb8`GAFlTSOJUNVQ3RE+5Y1Z*{aE{5Tp~i?zW(Eup;Z zIgep6P0a-t#SoUb(^gBR1I7xsb?qu3jjU74Nr~SfQOWL}!t&fe1E---P=y+!Aj0+5 z-#i`K<%=*un_G7`&!dKK>9)zo3-Kgn=?5xhyMx8e0O~w}fGfRrqkRcN>AVho+k**{rJ8JYHgf=2an0{Wul5x}nUV{@6W`ib7=)DS2^y;xFP~N> zAxWk!^j2-~OSqOO@Rn&bR}+UMmIXZ(fGK$QAJd6(aVEGz&Fg249DXA^t?VW<%L~18J z2iHRjFA`Vy)c^bk@!Tr_$hywR`5H~Jq5;*L-|(K%O}N*5r!#DHIV2b~{LC&6RYad9 z)eE_psrYGnvyVMrpNDsN{ceZQ|&=s86Ym=)I5IkR1?a>cz}b%^#_i%{Vhe;Nu>xp-ea zk^j>##txiwDXR}!S#dANn`Yv=tdndCqwc$W5Kp)LZACG576vR9mh{d+yO^`?MPR5T zz1aomSchHnvj71yYGdN?oz-?l`E5|lZE9VkNIpv&{oeK@tNT~@=&SCv6zLt^W(mHA zGJB(V6#nR<6N#VQc#PcSVgFws%PW(MqLUUi0ig7n{GBP@U#O+~`=>Mn@FC|J{`z9M z)KDIl#`o%sm-zI5Y2}kTG5&vA`C5Agfc&@oR*APF;skl!k^z6$v@yaz+sdYq@X9@O ze`a!WV44L_+szZ3r9}iv!dA%jg*w+Ye8L#YpwsV;Y_y62yFWi}_F0^7UQc2yEo?nh zRkO)O`_kd%jd|xJtTZokgtzH$FRD*;H&=5(yQ4wYe{3!2zC%M1lRdV)5qT@r0D91` zr$;PjeB*8`#X?b(u5aP27w!)sB(Ef;Cgw3pX4mtD-NZcrm=-miHCqYD;70hcJCE=1 zc0`$COsNu}>A%VK@~C7&*k_IKiyBwq04Nbzz@xi&qDN5sYnp9NV#0Ce;iDDHBWvzu zJKQzO-+W0Ji$l}3>Bc3(3;i~ov#$FDfb&DN1Rw^H2KP!Z4dB>!VuwvW0(`~=vcqb! z#4QGvFZ*U#j0#_uw=ATy380(&TnuUsop#y7P2$f=L*#P0R!i8(7sAjJXKl;Rf0SP2 zrkB8$3T%oDdlcnIF6aw;C6r_aaelk=d~wmOpG$C0FUH+fL9$NnPO6i4$hlYJ} z!a+NxDd1#F&~7#Nk8mV)8ACeY0ZTK6Jo-Q34)}{(UC*L>tHJXf;M-`~E-4mR zgr&Z8%45CiiR4zx)i+jz;Z)SUE{K+6iLjB?)PvlDJBN9JDwfW`FuYNZ0KHlr2l6_ zN97&Q{tYx~@!hGwG$eT##aaA3(UgM}=tB_)QhTd6(?3Oz#>Ph{qa^AIfBs<}GD09+ z1=ld*iTpt{AUvnGC6&?}JT3T2BiO{9$9OitD7*DT(infkzLl$1Mu$#mqetnuINuxU z5SD%L9hc>>@Rzjku*sV4r@itf8J#dYAw1~7(Dir$CM2L*edkyLEKvoZdz6r(_+8*t*P3-!t#bbYWmGdZ_}Uz{QDz37$7b-uau-gvTG+K(k{oBa|+zoMe70QkhEJdf)K7S6$vV2Rrey(kF`U z$#(9G!>Rk;W)a(%_L%$il8=R{^K0g=ePuXGkJy(@W0u??#@?kBI6>_dS{z9mQXm-- zc%Kx};ou?-fgwC@fd-=t9e_mj1?upR0pgDn_pI&IT5i9};~uWzjK#dF$8{}Jtw%|<3op{sGZ3qik3@M;0buq=UcAai3~F~unviz zbp!Q(%TaZr#GOtFD}IFG=RMfb8lV!7=oB`In6EcMo?4s>JMb~;DMV4jWwr!qtRhct z&m^is{2lpMA&*E|0WWusCq`)(Q~BMOXw{2&3M$`kKYRgP0D$g*OC_G)W%q~Gbb(RN zP>Fi!$RpBQBEyYt=zp|jeklL%ptwmm0Nxg;l7dS9=@&)s3b$s(Q6z*cieLN%TSw$6 z*LP$J>gU8klY7oKZb^K%v^r!dxhoTJO%`{0OHiL2&40x-oy~RfFWbtGT=yv482bTj z{vSIJf_^}`98afVeAl`V1F$5<>lcvR?H?m z!D1^E;pE!F2%9jT4rI9d_krP)F!k8eM+VGu8QLGNR_pb1M@48$V*T%Nna&&ScakJ6 z*<0Rlx+U!iwP_5R^!dzoK1yFt^i(Sq<}qVLEJYG}XT>W@zNa0!*9HaL z3n^R?iyM2xDQf0JeT?@4W|Ds^C?~e(+hUS|ne#qR-lTtR9>%>(vaNQwRKI(^Nf-hU z{Wg(3u4?CX7WSm5XbENZ1Q1*hnTh}hj9Va<{Xg@uMWE~JW+j+K!r zLuHgG)_)qL|E^tEK`$73it%%tBYOAYkzfEkB?#|s32)ESIzQ?77G9n1yQCa{wt_0C z_C%-?{+~J7xO?ncrj&d8s1B6*@w^Ti#ojV9u9c`gu^z5pzm#)^xFrP8+X^>@P<0+#AC={_rRt*`rViA)T(-Ws|*q@ zk~t6CE89!-3{ec*g)hV8Z4rKEpiL$xB%27TD+s<|@9+|;sQ`+q7Nox$UGZz_L+S@o zt;ca%m>be8tT}k5?VwgSZJ(;a)72T5EM}KZsP!iF3nys|=_&3z;^JlGtZS?7{+jt*nOC+pz+}`{Xr(I1&sBbb9FJ{4H z7(p}vMA7cOXcrzsa+blpR&A>Jj#1H-%FAPzn=RDxWkH6j^6^@^tghJl<#;51EpkVT z#dZ|80q^SHH`un_wq%omhJ+>d^P9M$Q!PX;7{*@IJO(9+o#Mu9PB)f3{1_fL*0Z;J zCxrbx#>1x*;t^nrEiwioW~7~KY*gF=Na_xmKMc#w4B^Ys=JHOra450Z3IP- zB^)y{@3VkKh0o^6pU`PrVR6L#9oCUGFU|r0LU>Df!OAMnoV0&cCE*8@Cey>1=zlerF6{ zuQUg)hiuHgZ}On;>6+pSbZ+A$O`MgqYR`QIL5VvVjTPpN>{%3>af)#615V06gUPlp z+?mW*`#mT$SamxM5rzS|Zg@$y=6e;~aNJi$_4&g6c%*W(r|`XA|0WR-9l&hChsL62 z>BMs!mrISgtx0XPb;!+*$K=uf^%;rLbI(14{6^qQnpzh!9%o)HA9p^Hj;B-)s!Okn zdqb>FSs>pVvz`qr)n1^{2J7wJlY-9He#2~gvoB%AY)Q18mg+_xoa{tRJoJe4sIm0L z>g}Od?NHENmK{s9+(*%oVxU>*#TL$SO&nQ{q_|g$<$EPl|H@LrJ$O+T2WT;4dWkOc z;Wh4^X1v}lNo=ZdMty!ZqP=e8!2B@EXmkgPp;W%65(35A9AO7pRu^SNSK=^)G3Np( zifcFD zM`C>Gy;!D>-(x7|yD#=x{Fi*uSgkC7+-S6U#1&;7P#@oPIUIx1>`2x~f0P^kn8~R~ z`TV@ZI*{BcyixxcJu>Tu7Z~lQH$d~y`{5mx0JuSRyg%-U7-HE!AcqPvkdiE`0$V&k z7R9vU!r}t%a`u&oD+X?*{<+X__-qalo1B*(++=csKti zM%#l~0)xcCNY?kCf4JgcAyX>Ugnvzd+PSvs3{V*7SD}1c+&rpUzbzyIySM1^@zUIX z1?%f|5cFGXc6A@gPE@Ksmz(~^H}iutX@=?B4UtI1>RU3& zOmBvbBpbQ@r6qc`$L*G7Fq6{HVE016kF{b`?R;0m0_k`mvz$g6NS^~UZ=fWc0_JHn zuzQxV_B=rS=@>RM@^k{I30j`o5`NlVO4$AQzc#(>BbBjoUaF3WRq0*a>!|WrN_0|~ z#a*37zwkXTw4$sw^r9ruZylJ2^hN}2{H6#n4P;cem)_2`Zr#2y$z2+++xJ)Sx9Jvk z;AATg{Uvnnqb7Mgc#Oxa;5pgdI4(i)whY4Y+EFrarY8~oDuoAJ(4Ye=;3LAqLk7mY zojnFBWx8oOUxij%_!xge(Auopawxa(4#~999-tIfp1(V! zaEd2Sd@7pf5ni=Yhbdhe zv^cy#Ib-jyJ}UT5zoEnYuciQ2OKQ(8Np1VB>N#_DFCVdKl~Gru^+L6sA+qkHITA2l zH{lX5{vob^a`#Sw!)?+l8q6H{7zZa6T?d>CLU2E2u#Wj#6P&2;5f_->_zj8&X@KJ+ z=C@o<{Dg{kR!1lv^m2lp9xgf_?H}V;y6>e~j?P1I7yrTeG)qa9Z4Vhpis!jU4*VaUrKTz<0LWW+F{!h?O>U9ZJR>mVuy1v}&X7=1+ z_wUMb2=IAHDmWq>03C9_DCX4NH+rOjmQ;yC^I-9vZOj1X%H3{A^gJstfP^ccR^WC3 z^DxStS#G*{S^X6YIh16z7$(V&AJ&4W@^{olj?`>{@&>W0JBKmp$jdYG*82L4$5_Bdaf zqs$gZV}=s-f&2V&N5zG-27uwC34`n@9w%pui0W8E7JiDECms8t&+Blk$M}! zr4KrU0OkJvb30bhM>z=n^!{4z1BBm#gTp^tS9*5R(ID%uWvAg}>O_8x#+qytH(S0{2F2SL!>hhDiE&k6sTOcdmPRU^&vnC+MVCMW&y z#;VxLcD=mrI=0zT_My*Hs)gqoUacT}L}XENGaCy zcbdTYPnIOXRk*h~{)BC5!vFAIH4~1HN0J0xN+D&c2hK?#3!hD#jPIeGEt3(ff=?e9 z%MH{#y*_6s8X3j#aT9ZDK^VFXn~xq!v|#7!XfQ)w-t;v8+Z-`C_4~P#6q<|?GHfOmcjXce?=;|z&!f=Z3qlg-0oA}cqRtTx~ zZl|)ajeq*!)km-FJWxAgk@mG29PgD#4B6LMJ>Kn~-HGRFZG%Ct1-;1{(}->Hb}QCtGKgqEKO`XvH9&>w(EF(2`ygdH}Ye2zfcH zZlw9`!yb=TT?? z>UOA{m=^zT30T+Jjy%p~xZ3(SOgX0)zj||~YQTZH#SK0jw;EBN9{FAg2sq!pAn0Vy ztbx5ER<}^w9idCs;>`_7PyU(UPS)V$tT?{Y--YT*eLE~x7Uu0l3&uvFao>WAVOI{X zXX`@?1sSGs8Q292DUVd7>%#{@-e(I_^&A>mk~M^=fSs2ju69(v^(rB?KlGY|>>_#IG7M#bC3kpR(p6 zs=b!4Pi&K)ySsX>94gh=@5#Qy`2Y;lNU9l4w~gtx%>fso#1{9%b)kQ~IR~aXWLPW; zsHOUA-GSD{XD)X~^K*rytjN#=R&3O{B&1EiDBNFCU%M~Y#U9Elg=`2o;2Qt_`8a^174^G2u0Q5ywZhD1 z`y-erBJWuU8pmgKAVE^SVmn!Iz3!Z_)WQxZup8{rUi-Q7*t&BgEZE!9g96sAp*4_{ zQE^EpM^*Pm)irTJ%jkq8sWH`2CzVnyBMImzx>5jvp>?>(m<1q{aMJJPH(PxX#$kx& zAhj(DP5dNLb`*o0n{DjT_vT^L=lbYHpjm~Ru3NnDE)(vWfLTGWO*4*3ie{lG*Z8R? z zeBZE`qB-26ijy%201EWZajZd1a99G6vadEDC8Co1k6?}^G?TkZwNNS5Z9H4l0qrk; z_InoY*HoWM(Q@uroR4Z{5>d$cZq)$3NZCkEGkvIm-BW{vV79R8VNP}PNQg=t8m&0$ z?VdjEP-5CY221MbrGWr#igPqpeCKa>&}YLobo0K7lyUc(4qP}^{K<&J&%8qzFQ~`Y z8;zmEN%(hM11TM~nVe1)Fsk=7+i0mWy$~i&!*}(uDP_5PawZ8&^0$7CvN}n%E@ju$ zO${l(KL&E}VtgUk5m+985FWuYQJg3-2~nky2hu;WsF2 zWob4*u9b4+sb9wX5Ow>Mpn8njp5v;fof|5k5MiLTgDAxWqX;s|%`Vw2xIOprwOukd z1dy+#$yr7?U?uK1OtrZkV2fWL^&PN@^tD(+6-Q-;OWYa3OK=w0abn0KwS0oiMFAgz z<^J4*ABX9F&9Mmuv((G2KhmilQM$XYgj2rg@`ViRe64yOhKaN~5r5l2kGFT{?!h2H z?JCiu5>V8Lg5vZeC9T_Do#bn9Z{vvcUJ9zKc-vZrzyTj~iJ8;CYHoL(h4HL}e*+$7 zcpthqmtXkYq6a;=Y7(@f8zV^$Z8j%HD;(zPjutRORZHPaIwfl?e8Y(&!0WF0Q@0;7 z%FmtKup~87O2@sETHRfkXRNlp(=Ka#HiSYf+IF4CRuUx3 zj>3?~s7`hi(*tD_j$o-iuIPAh?sc?M-_Rb?Lf);42fuJp-cNIW!CS5F&p&G$avS?Y zC+vMa5_2Mu*dGUWbIOt9w;fF&T-(Dw)9)6a{@cLLzzheqi_HG+!<76utTsMHX)K^o zb1F#&IwX&?kd=7h$AT#v-uRKyzoR8YKR%}<3L@rO+!`&zj_dPIc;~eXA$Av^H8Guj z;Ex))j+kE`4x4pG&rVvSipIFe_L;p9{FRK3xZPN6^%jH^0lAfpwt+q>RZ$T{962;H zUq7ScF#)TT25F!&dn zUituVwnfQsVlo}_+Z%nxBOkD{f$keLRW!nUpWBUq%q5wT<`epYWKr519SXtRIQ^__ zQ;(*#&7RvgXRC2O?fapRDQ7E|Bw8IXnwqS=R~T#kgYnQR4TRpAJ(f!*o6a|eraM+% z*W}4hqpZf6h_~Yr6PqA3kpKhdH#qziQIz8JQn#Cis|jH9?FM$GV8;L~5j&<(%Oc}c zSiL9tg?b}Ub`@1RmJ}Z)d$is~tFOC+6BgAuNg6EA$F;_+HB_$EaFkLhN{v+(OQWDi zrLymSORVEhvUjUXUq5&UgcJl3s%!#%K%b_&aA}PHXJKQ5l_bpWza12vNxl{0dFJ@( zE1dtIhG{QpNQEsV;$!=wv3>K=z8=eaxlpQ;nMiihi1$>k2=l3jUQS3vvvuqTh)xhR zk#DM3rAvrM9F|XBzIN|+8=OoGWt~E`hskB6_}elqoFtksa5?|1?hk)^EG?eLr?_-H9&+Q9EV;JN}{s3bQ+1f~7ks*i)wfiQFX&8iREH%di;a5<1V zuB37}{g)|R36%Da`|VUHa8p=t8;>_5N5kYit%I~;C@sk+{$6b>erBs9Z7#b!N99_$ zs@th}IJwyJfu&T9HH{U%*HdFVg~|PW)5KV}l$dPW20uZE=Z|-V6?om;Z6gS~;(zrL zeokB+)?9un7=J1YgzN!SJvs*58@o*i@(w`Qg0{4T%i zvc%^$ll1GVBQ{diIqJfNxQN!2u~|1l&TNdQwx0NXPUN1hb5!K-^Tb1)1jSu?3jTi6 zj&-4p?`57ohY}>ZmN{-*8O|qz!Xz(;eD1odL8JJ*OglU`XSGF)(iP;tPThWQJVALs z2c!uMbQ^1{HN|9~2|iZnEN>l0G}rb`KBLUi9p&+nbdCf;t*5n=VyMha>HwWKkZ(BV-sK=PP2z+b~aBoD@vZXftc+Xc}9F`w5=@tb7A3Yq2@-L#nS~32V z=wkte)UaZxrdY&{Lku~?9-~n?2wr;Cek;t4sBI}k2^U2Mk@I;B@E>4Hb#E52d}YEh ziLGL1kXmy_LIV*RYlzkEVkc@S>l>`jA`ziz5$a5JZ3-drpz&{#{DQtMv^O*xf??7` zJH#}`LLaVTpr#?e_Y3V?doT6Jp_K7|z88;|=2Dq~y;c#;pFMR|DPX+gEsIC=&O>lx z(~eo(!f%#13j3P#6YReHiIw5HqWSbIMG&a-_lZcm2XX-UP5(7b2f?2|Ul^|tZBf=k zr3@7eC9A09*fBqe$@BC@&b&W0QrG)L(}wp~(db`%tFylOqA`pQnzj=O1YIFMwxt#G zt2dGq{v0bEH6t}?I+rDH%nF?Gbtv}tWlUvpOo_mcOdw6i<`Qoum~9mbCLdMe6uKW} zSGe$K2Q>DSP2(XHn^LC14J1JGa$d(rw%Ha`KA~*O_C2$fa z^WSjeI;Thn3WtR=Rvi4y#f5z49a`(MseVIbL7}EM}`_BrihO=Bl;OIaZWYPIR zk{qBDU2CA!opYmYB44Zkp_m&^iB;k3NMqMf9DrddRj~f$RJ@Gn#iDi>z`bzt%LaGH zU769`Vs7Z^T!$=S%d;$&Z}!6mCEPSxdXloV9kK2>EhoEtpM8WilC%57>?%U80_HCp zMH-2)dX1C~hm=^#S)DuKge*2oJ-5I9ecA$C7I4xr*4>-ADd5EBhuz%2k0mnCPR>1K z#5xR(%`w^SOki+}4~GbJ$gWYcm&G1c&-B_+-ZV75?6EaC=rZyX2Mgo1X>VQCso*1P z*ic7|FMDYe;`>FPlDd+*9|*7*&v>!F67|Qk7Tv=CG-+~It#2-$Y8LTAC3Xh*_#Fg6 za@VJ78t^S0!7%(hRr!C#)h3uaiGzQzTPFRg9d;(VWvPV~UFgSCi%*JxvqT}1g;=Un z$lOr{HMEf^6nECXi@|&YD|EL=AVa;b98`W9C{BE9tRhu$%6Xd2!9JSq#W>YZ#DYvD z-8FT-je0s~;MsD7yj-G;R=TtL7gfv2QY+5Ja5PM){Ou*d!F-Lg&k4`bff};v5S_#z z568@3&lGzH2i3c_9T9m2#&GD;?q{OP-sR^;ankz}W7;nwar3}?u;V;lo+xpy z$B&x*mn_q%EW@=DD`RF`U6_99 z>tF48$hH=5JBuKR*BWmwXMeSh1){SElw~XbObx<7{82V$Y$)$d!7Q%Ofzw?rHZWXb zn^wi1;$G@2g`aVF=9-kd1ZV#o>9GDKP)1wNGU`sfL$E$JNUDBkd1Q>bAA?s5wc=c` zLC2S0iPq|OI9zVZkHDm)Y$EL{i)uDj(xoVA{Bg$8u$nABoIW|-&geq<pi{(BUV-)Y$(h}Ho+S{k<4am z2@lMdDk2ImUfk%UE5WTd&o%Frs0ZLFq%6i@4E*vz9|YhU&@ag`KAQbLPk82S2uA-r z&M1}!RlwtSojtC}27}^dS|A0<)nYaAVu3d{!&9{V%BC{W2363=4aOjTyp1hpH`6N7yc81e?FnG{4D^0e|yJ%1o)KB83lMaV34b z(4?R^zWPhB&wP;@v%H0n%9RqiW%;>15V&Yi%OibfE?#*?CvR~3Rt0rpaJ}IKR`^A^ zmC=UBtzKhOFUt$-{~iz6_0XTDr;ERecshv~$102ddzI)_=dJqo`@&FhIDTGv=?mGF zuu&X-#{v8^wis3ywxXCS60C}%HOhd4It+3LNsP~f`@3}q*i|gtg6GW1{_wtf*V76v z<)3heJInRkJ{Qo8BY8W+0LFjM^*vG=8ru^RW_B8iru*kk3^@%|u|G5L9Zj^={fA8r zlahmu6J6i?-*1;5DO92i$(-&pe#9^R4#!ai#;AMh& zQ0bx8L|ZW(=b#5l=)VB+o$uox7CFWTA_+uFX=W=Q#G97*b+#nF#1>7E7CD)`^Sh7z zF&p;GB9?OCt7Tg2q?nnT(zX0`t6ys^{;6aQB~IquT0RJ;_N13i1q-{RFE?*ZQp}2! z!YIe>dqrTKS^@s&o)$?G_(-;#9aefllFB>0{$MIa+TadXE_lCT;#(~s8&9*slZn)^ z#2`z-RrXIIublQNIJPeKx?*uctauk`XrTIV7EhekMO~U=|Kkv03e&i$ymq_g#>^KS zL7rGn8eoFDp`@Aq%}?M#Z>3V5?eO2jz6IFv?>xdr$n3-3I3*+&F%(Fe?T)3=-+9UQ zkg>4+o}5f{PJg&fZHLm7!8|DBCH%Pm5lJv1K-$4Tg5Lp#-ysI6U}5HI9RGts`vua} zk*_=LYN~Ons@B6>p07doe1sG(ye*tCi>xqt0N)uqeges%Hp$TTH;S^slEH!_+7cZM z*PlL%OYQq)DIgC{zqQq3wbRqe9PRRky&~Rr(N@zRw9}`x?0bq& zCgc@ZIG+7i%sD>MX6|(Rp|?pTCwK@+L$Tryp_a-**COHm%qJa3h{E|K8wGttDu0_Hd-c1%}C+<7f~1UjRijY za)}a28FMO;rXY9kmfjM#N*IhMwThAQb4v@j&hBp*3F$V8-&OOMY@KohP^gI_bOX-T zAK1Kl`smIt{Rl-BOR6k!!Sc( z%B@-MY~gCk_a%&TsXqglasQ>_!qd(6=j6eiQ>J|8S*~$oQf_UaWkjgF{xPd0tENUG z=?(ui-#{x}#>WFMm(o;Bi=Je~fZa?~^Wc|R#$-*7vCb-WSk={VGB@Wv^j{m{PrI}~ z-0a1%6&|koeY7S+x^D1XCSNP4V>#Gdx_G_bzF{h5VMsJnNXE>SX{?&!q-7kewba(> z^YMZ=0JB#a^u4g~<0shj2{ceL1Jc!Wo&|xA^PBGQE)hDXDKCwWGOxr3vEONZi|PKht#xE=q-o2zCVbGJKV6X!I$w*}^LTE! z%AI5TM84d+R*10`Q(3rGRv#PgWBkWe!>W^5zLlJ#HN$TNUh^(Se(jz%CP?Z$gP>4+ zPDoQ0W)KZk4pY`^+K;2@TsO{5Buo03EwAp=@L!|?{O@P!eacX6t*SqNU>>?W&oE;d z;Gc(5VIR2qe|t2)d4m@}uONdOMyV(0BA-3Zq}^7J-tE97v73i2F!3CpaS|6xD&4O^ zG`1Q0k_6-PdctDiq`qIqCfuZDQc)$48NUTqrST^Xh5x+279*^TjNX909v3gKw%ogv zS$|g0U!7FR_2S-PIvLP|O7wn&QG0G^F}`ZKhdTXRS_m&{3l$T_Q%#w=X|2$>EHebI z%rq5@$U)pHc*<;Z-{r-mLxuz%<~S%Q5TGMOJ?fB_C|w-P!CaqY1ZB=?{K3J|CZywm zy^;IhUEbKq$;({`1C?Xe>sypaX|uS73_U^bfmAl)yzSiXDVJtqXYxZN!@CR)IID;L zj}07z%8z-zKnp2--y7zTf25@w?oZa9+8xYf-aEgVFT=juV1bsgwxKfB%u8iBs%6DN zvx%2}e!4Sa=_TJ7|E9(TEy86S{Joy4Z-bY^9`s*dVc-DsoOS0L`JL?#FwIn=ew0zW zRwNDIDVH}&dQa$!s1T4k*M&wA7?OX&ZuI+W3dM(dEv={D#*2sy%|aN7UzU2Gjavji zukl&=tM6P%UJ}W!{yZr>q<+%sBTM*nTUdpKNAcyFdLp%BAQm=gG;Py;sbJoK555Bk zqx^2z->A3Wtr@8Ezh>lxXxLA8fBuLfzZQmoPvU6Y3C`uy>jR0v$<;oHI1VQchZi4lnt)zL8tF@kBy)uMQ*4<{mB+tL0f z1{qltAH=i_P4)KJU^@b9g{2CuNrobDlcv+_PJXV^)Oi){%FXSlU2AjeL^l;Hg@y|b zyHE3&@=`E>UhY)p^@-2b;;D@KrODcc+;E)P_{|KtuErR}dc1mzEE$1sS@>z{oQZzI zzm_l04s%La8cR-umsuY|Wv=?jQj}TA{KxSy_Ts(i9Y=D#&}{726#N?1lTz`nJygt?xDA zbCwX<10z`ZD!v7kzmn1016xWVjMm?eR)YfkMS-G%oRq#d3KH=Kw*bDpUs(riMXpihzCW zvZPGyMKf!}bE;Tazs{i&1r+oa-t-_IM;CZ$rw;wm#!mV%(;N1NM7{td&S^5gxtvxt zQ%(g_SWhccjj%nTE{{l#v!A{V4JdX9tiqh1GLCQ1;l{N&2x3hXKK&SN$wxou{aI#q zTnlj-71hXEOWG8=FtyK`s4hmhvTLbmL;~WS#sr%wL+aF$wtS_oX zp|Zeh!`HE|iXPSaQ~WuaY+#0lXi6@?P{&)gb=LAnZNDPDv7rhpd?(t*N`}TEAh>8V z=o3??m@8Ak{eHhR_(Z?6S6Rw-MsSp8ghg#}-%{BhH{_JM!2Ipf06bud{e`@iuqmSp zF#;_tJksy>9!~TL;b`@3hH%7EX+Tc}QodS2rO3JM-(fCeqODxJI&~UoB=Fe6i@iv+ zq%i?fRgqti8i1PtSM=A56rv9`#@Y~qN#e9+0$JW8t)EqT%dO)W|K6;O<)0 zSBU5%H@G*JR|FMkxk_Fh!Py?bflX!KDPz$LI6nOztjW~<~x(R+AvPebp7}nR<)Lf0c)pC;kY{2m23bPMFhK0 z)0~kXR@zY4gn$QC$TiW624NYt4L6EIq!QUDv_r92`0vt*W!V00GElR!ma~W>5YUgx zZBC+!uOS%a44v&e_j_~RAwsSg*w)(ugHCFRQ;yAk1l_ff#NvI~LVz2?MlG`VLL0ZI{4+o3`6*&+y%H42r0I(N<#gzGNEZ}@!nYmcgf%a$_d*WZk5aF zK(>B@v3&PpJh&$KSXCLO-W+sQYWO3Eq*2=>RcD(&KVeVOLx>iAa7pPt1~aAaJQAW4 z#t?bHO0?NlAQfBIR)VnH8PQr8y`KA`am3$%>QDEy?y*=<^DkhK+=EQgw|W5BPKVR6 z{?EBcnbtD@8scjrutLRl*zEglC~IH}i~^!9jzWrIh=ip6IpPj3e@VzTdLGs{DhfTE*Fy`)G+ng}?E*uNh%+rj6I0BOS9 zUr@yXON&vZIM*iyLs^X?jdFDGYQJ)PgW4O7F!sU4Gu0kgUw6O(&(YsRga`#zh>vuK zR_}5G%RK$;s^8Z&NZ{gr%VCs~S{W<2cH z7q>UKND{7gmb&P7qNjd6sRk#Iel-%Ex#bNn`dox6?m}q81%=u50MfDt~RtyZ4@75;_mCEi*Mkac}4-RZL$D zmqPxCj%*3uGq(+Gon{nBZ8ue3czLgi-C zBuT_iSug=lP(ddDz<|upDamtUt=354pr#>8Hm8E=D5iIjzpf(Zek9W5!>m%4=jNDP z5u{AFlPHxNga@YcVh)%{Dvv-?N!RXjEet?tC=*ogFkqum>Ja9)F%5tH`J1hi{OiwK zCiS3?h@S~&fJsJRoLnpfJun$Rmd{D1%Bav4BhPI|LpKwTnO-mj6Fh5U6gRj6XQ(Fm zxQx_?&x@O5VxB|kT#g`^`m;Typ4r|s7PcXRMD*AP?sU7YOetHZ;%mnh6ye2hJf74N;HNYy^<7nv^dqB zJ`l25nUPT$8PN*SqA)NorhrK!XqlMB3773IX?W{oXqgHmcP22hHx9?lCW_64Os%$H zzxu^T;HWJ$MAmkBUjfI6Hn z--7no6P8wmfJ}t`-7q-l#ubrDmZSFT*-6}Xi}H4m8qzgds1cE=wzfGQE{vLX>mzpw zO;3JYkS{Pq$vx~K)LTZ9Ud>FMKpls=({{E^IC*aTvhuEma-CUVa3sB>K}MrRJuXsohHzH+#hWdMPDUNKm%OGX< zxP+|Q-Q%6_tQqu~F;)n7%$*)?4oC>}I;0}1Xljcf2A!QC2n2<{d<(73xp zaCdk2V8J0cA-KEk&hzg7+hgo=j<~DWS~csMv!;ffRUg)tg-eWnj~^{epJpvmo@CUO z$A"FTqfT18_KIJnIgK=J@7wZCoq;-vOB{qx!yHc4#!jiHms3k}h*XXc{kX3^uI zeUlF5uw=fTuB6DFH1;aXd7~%*2Zbc^EZ!(oqc4dq;j~FW)mrf+_nrFs#1_`aPNCDxsA(F2s#*nSA0h{6=vxD z9IH81w*Rn9CC=8q_Jc#aAB%vXM~_>gq3E_KT))hY&(k63vkM8zd++ZI0m!gnX(W+B zkTR>9X{N{ln$)_IT>z{IW8EQ0aR*WNhS?r%z`j_o|7%z{r^r`$FBy`C3`)jJfgO|| z(&PmaX8DwcQF1kpv`Kd$>ItCuiek2Gs^M%y3Gt_(C$z4i(SWR)1{napMc=p2T9AT) zl-qCnOiHw$4WE?c4Q2P7yt8#ypsXl?dmJ zl6RziL)LnzFT!tE9p`1rtz^_IBP^l0BL3Y4wuTH+RLWzh*&64XxOO7;zm)SE-r{Ge;r9Wt@(XjYN( z!MgYQrt1{^Z#u^mG4bJldYoVJ+QkOq+nu(NLZeH`2B^yEMTVX{3U)_Vx(Gtv)p0-T zoE0^Dajl| zOT5f1}eb z=_#Ib=z z_w8UjEucel-5=7s3`aYmP3fW+MiT_t41f@2)Jy3=2A1?%VeBxE1B*k5!JK^zEC?E$i7&KGSIEn<=B=Z|_3Eu5U2Q?h9f#f)%qBU%*^~`^o|`>l;O|DbtT=aFGW?tM+pmM!SWQXKg}kf z%?^ly5rit+tk@P%cZ-99M!?GZ2VaXpU53uF#TCutan7xYhmIvK=@vTuA3G>{`Odf+ zzoYZmKr~+W)RH4(>~s_)oW1U@qYZ@}MsRqv!ll=a`&^97HdT?9gt^Q%dur; zGMyM0i{kdv-YGBN}zaUe=kKcfAj%`^G z7%eyQKf~|WTk5EY88dXlXpGX>;OSMrygQ7qS&GAlK3&WlD*yWsQxaO;d$s(pY5-5R@$jF>RDl^&iH8^2$8>)m{`V2+#BT{K--eSaJ-R;O z)j3_$vl=5*sk4NrOC*?pEp0zh*F@tlLsf+mZ>YAbKL6ijwNkXABC23H_AMLk`dS z+~Y|vXG`z)noIZG=XZI?lEVYw({^?oJGDK!Y|Br!TA5*Wo*jzQ)wP6E_E=eK33pC{ zQs>x|T1%r<8SJSTPs%*H&+p>aDG=X!unG8}i}xrpoBRUf`zmgwvF*fLxP zDhlfuy$DlW1Bc!*W#?oV1~QQ5Q=A2{+7eWf&=^B5H7B0=U8aG+OUYQ|wOOSn((cxh z%)+6u7xiXk3ONDjlK9ZE-^d$8c-qvnARei@T;!G0;yO=%(}bQg{0DC@<7m=Ym+L>$ zBN2)ABV~=!_jB>4-G(iGXNq2Ck&1#A%s3r78xUhG?kUf6yX%J9fYiJ(M+*ZG&Wl=Z z9rOu$Q$v4U$s=u~!?Dx~;BHghb=tQZ43}yfLtPXx^>%{{Egg-o>TFDm&|ijbtA7A| zzRe&%vw6)o)QUm(n048_yrxv#?x^Uz}+6~?t zXh$h>1HZ+*upRE~SC#uHn!t!QOE4gVDvd>aBS#UDzWgGI;3{KkP)bnEv9xVTVyTO8 z+DRdrY!lArkj#B}O%MTI_j|4YS}xw{-8T}SH$CO`!0R{_IDr&%9*6oie8nbSaz<)k z!;tNAUU^ja#zZ#goju2s+xDBM-L01DNjGkqmHt$@fadGa@>xsH-ALEC z8|8A6n+k_0``rs%VyFa<--$P%q|u0ovKYe`8!bQ1cm4a-&J^C`_N^Kh{j zVP(1J-jxQIi%&k)vVdcG0V3?nzYa%T|9ClYpJZjnJbPj(flCOm&&^sF8BoRkes!!i zV`NPuh1u}ZJ-4~53vx*kk6#3|SaDj=M%_Y6780B;Kw)*J?RiwN2T#lmT5C>-ycj{C z_p?6O>EYqhYqkpbr#d6x1Bn3iKc)kHu_cfEUu$eHl`Z^^RB+Z#MvhAEKiFpxbl3E> z1%_t?jG(oexyq}7H{-OL2p4mWKbrCf_#*-YKyxx9;r`KOm!gUihU_av`Qnme5%$JN zoG(=jL-xcmWSB3ZOvTTM2G`lR88th{8aACZT=Yro&1o_ZlRNw#Zu|BE*xN2pS<}3+ zDc6&$^RO5umc(Q0ji|iOBS;y()6wP48rh~W=iy{Ey`je6bS6(^C~{@2?{LY@bU>Yp z31qIpwqR*Q^Q;HwtzpK-Dh;EfYI*l83MO8NbF>~v^!efsPBn#?geRS(jg~r^tWCwD z7OM;^cOOu|wAnbrFY_IZ{qYt;LGIaWk;AO4h?*5PK~suh+7W+Adi&KF-8(U}n7RnSDE;e9%FHHyH7C{t>hQ zYBH1igqdWUse8MJKl`hsiWVzU%vhtVod)E=CrAuPPM}kNynMp2+A>||1Uh6_8Ai$< z=nsAYAC4NdCR3PU$R%7n=rO||?eX0eEr>%*c*XsFW(!2ZaC$a-d$yDg!JX!xd}qkW ze2xM?P=Q8Prc?#dCKnG|g9BS?V%)308cJI*GM`}#>amB9?7VY7AGY+%_!A%Xh!x-0u3gL;*JE%6{8?Sl@aH+5@bUX`VZ%gUl&<5{$di0IFvh^@ym2${KAN&PLU>((%^F&s26B;5VjOm_m2&NIBGEk z$+6!KmKR?0!~PD71smhm66Ha9C4Se`GCr9DCoafu<$;U*b9 zVxB66;+*1Nyal%S2*?T8_&49+iH*&RiBbsPHZ5cJQ4b2*^brpn(N$V$f9Ffxe-X zJf46^E1~8%Jz@lyCaEPq^Ni66NPuH0WPalWg&nLYE2-MNvipQm7rx#DWJF|aUk3e$ zzhVx2B*82+Pw8to`Bif<%&??ZPi~ISs?-#Rq#)4zBAaAq>4?~oc$i4Yp^fkVmMYZZ zd^}$t3yF2!_h;#XX>)%E%@{+)gNdhx&69LbH`5_YHfApT3)a>*=egm)Q`HHSnB%&Oc$n-clDKRC!PGyTcjyLQ%2zyQBFd z2R@z(#R3==1hIVlIWVRaOr0HwBdDdOzwvS0Y5fc-dL!YkXYXRW;Yy^9<3nd(qziXl z9c<(Bn%G7pf>&J8U2t)F5HKSNN|_7tA|u%VUg|4KBVZr+q`j`PF#~{SA$wJLx2^I| z5f0c3^}Q=fQ4gsFk2M?iDy@SoS82i>wwBC*)>(}To&iG_ZOpq+Qe4VDUQBJghZKap zDK!+wVi5%aWkOVQeN78IGv5XnE**#vEkGh>t1 zMfHV1rK-gCalSnbyY9wEyF{M>_|B1D9xxZ%uw2$~I{jq#Z)Hx89l}LEdx7swDoVIU z%f9auiIerQpNMbZL)Cw^1*WUPr%q4_?Ms!RY~|W1BZ9M0)z>AR`R}p@@`IfwmkOQx zO*JhvhyVA$S!6T^RPZN_Z7W(k>M~gO?%_N*Gjp1cM70pq8YpAAsPrB8 zLdExH9mt-SmcG+g3dV)OM3h(1@;1V%50z2lZ>o3I{y^otEY#1<_x_S68gE=*LZ8Pj zKIaX4d3PDF2HPZRv)xUAQM(ZnK-h0gzB2MPl$$S|62u@Z9Ts0NoA(ex!`fn};)3rV ztJ)J*uU6#fgFra;kbyz(?}|3OGx7nnsy5Rc6g*%Gyp(KMU1f?`fpqN|ux68Dp>M;OWN8n7y9KDizb^1yZMk8dEh zq9+X)r>#vn9eAnPIg@ouS|fjc#=LCbTgCt-pD8kyJkf`RsgG#8QV29ke+^o~+Et8u zFp9zw`vvFt=|=!#p4^U)*yEGDvbvJ;%1w^ICv4Ul;cS+BBo<$-K!1!QNB0w%%v3gj z@chq^mLO0B_Rm+0o>H_!BovS@o`Y4JOb5A2Ux@in}- zI+3$i8;d*2H1@h!aF-dn95E6&1J2y|b)vQ`tJH=Ddpw(TrjR?ayuL1MQ)RX!G(u`n z5)c+jRww{GFE_VHB*3@CSN#jBzCb3nU$`9dwUnSAQAQ8PdXDfMP-GHV*zQ#Za&)mR zaN-Z?;Fm#$JzJWFje-jOks{|-pC{~PRA(_@mBh)nc!dSTc2FrH3b4w8u~DU3v9 zbHTV3u+_+mEHJ}LNg~iYs-LxWT%Qdt`191nBC-@c25@Q{jkl#hnayB=%)%Q>(})*5qsH?_=e+ra zM~kK4q^o5x2F))M^U)R{V@6D7%!QbMK{Zg6JV-4v@g-mO;2uBQT_>2ZC6P~vRHM}2 zB7y*pfF<~lTNaJ08WMrzBAbz;`EIV>4X+++0nb>xnOmco?o(kqt%>RPI=|(c81hC& zp~vk(GyzQSX#ag!kIUmQ73hyi*h{tVdkpCLE0J};K6zSoE7sscmJfR>65>Y z>C#Lcj@?PPcO8C_fL9S9Hrn1GDrTLryu!CMD6@mpx87gYDd0K_k$8Okm`2Ge zhhJ;6dzzrT>QbWB60i>yUdMKnAADXHzG!?kyC6IaKsfUxH+lLxLe4^@Z7lkny+{iq z6aiDCaEYTm-6+6(`l^=@#&bWb33Z?odQUA;3~@g{mRI>3Gy-F3qi8|V{t@td;UErV zM@SlZEZ0Y5bW6Quid~mr|INB6+gezo%Fr(6y-!PB-ZK&GOMGpa{6!ttM4+m=+O#Pa z_Nsk#^(wX~n6#BydpbaLaKh;KN{lh0yV#LleYn zvihTauCjuFb&agt@3l;$L|%9wwAfbTa#Ri6GgeN!ld(!;5GH<6iXR~be6oI9OYrib z_Bjs=2`Gl^NE+jQQ)Hbyc5UACdS;g9dIB<0?eBH*j0jRzA*OxI6(?mGfek zK);_fe@eszNW>LK1&DW$z4OU236HSX+UwM4OrS%$YuFq<5DKq>!cfW*rcM8I9Ei(J zw9#Vj`-Ejwe%Sg>^YOj8LT=qEXe?jT<{(C}wIm=05kWq#djr)`D`CDz0T%xHfz}^c zCe8BB$p!fK3=d_xL&lRgMU&#eK!niOyXBQb5iLYD`DtoDB9{2vm;lieV?)Cmg!@)S zTV25#{8eUKvChGp&4)NT z#=&}nd-bP8UTQKRzUyMg70b8Lstw2YaPGr-u(9P92U<4Cgl;RhYv*;~D_oZh+FyCv zlZ#nrPk5Q{V-`P?Jc`QAXM{AC73uu>>tWK+FH^J%?k|gF4Py%j5ti-GS11cQNL!nT zMXh`~FrIbk*{YH+;T~`2rzSK?Nqrvw-s72nbMCEVp;v~>$?|v9#*7{VuYpW1C5dEJ z5h{L#=YsQ1AF3<+buGjr5C8##`Moa} zYku9o1|v63Aq(V4ckcL?{Vwx^2*_U2Pg7gY+^ol~Mtq?a8 zkcV`==XQLxc6%2UB-o_(pI1d988XN>gL(BHI=m&;o=Qhsn8WNb;#mrP7UX!`r)20Cg9I}s$?iWZax^Ar&AFt*)X$i|9v?P~>OP`}2_}BSAKjvJx{9#6#!R_FX1(5}b!PPqCK>`O)iuWA z4~^9vdebNmP1bBbOPZN%xmQ!S%+)cI+CKg+%_uoQ`Z6nRjm>0gyLVS%XpkEikeRio zC6mJf94P!|0)^0%^+wW*qs^S=++=L8YAuy17!F#C=*A|P0#yu>RK{$fe!4g_qYM6n zciq48#*6!_9i{KhV&J_;(mW?8|KTz zQ8u>}sfyxyJYpfI(buqp-{xWL^v<1xs9BsOZn^J2bKZ8|bmwRN9w^o_yr=r)X!wI= zVZL@VW37#F#cQ3<);1#j(QG-tF5pL<2P}!<0@{f;+VsudnwZgkr~e@|y@ zkIy~RtALEBaMVLHEFmVQNbAuYvh!;f)L?m-uif9}S)Y}fc#x58$o(UkB`VCeO62vW zmnSc6)X3X9CY+JkDk(RsqpkMyJY%oLZl}uNkhFxU0%3Ty-wSwIDJkTM zb-OS08BQcnrYmF{nbMCE=>08|-q1@M>^YG!Q~N`Q*n|9yu$=uXmQo#cEJ!PK-?(~m zgeGwCfYz0{7O7oMNI6aepW^qS=O-)Z3ZCwQwYv|i5&H@ZK1Hit z;pBE+<;SibK!l$;!*oq3T=PH@B{Bp z0>yCf+%`<*it64&W3tI5uV-P5MS_q*pE35O*5~MP&opTyLSYZhm0o2XhQ^=6&7>QB z(Tx18zDFK-_Z2;FOrzX;LcT+1=k0N{)Km@GYSmSGRHbXt;Q;(JoirMb^ARI~dn0US z^yBcHbbPVlFVe~)*{a60l#XUyaZ?+OeC&1)cM%hS{|3(Z1Q13DH1hNE*7e?3#U1^` z?sI=~Ha!iC2#e=v9kR9%Y&hcT2YLn;eev+loeo;129m;&n%qSPAOwmPF)IjAlg8hki%`P8i;O|`W$INAVD;SMB+^G;^?T9A zP9nHA@m)3cDh0AaH##@j_M~(5gpgS+)+ia1-#?f1YS!dLu&-iiRtv}8T0Eob5$V(A zs*mlA)@v-UfBW0Dq%txiMKO{?F#UNva@4R^35g98$Y`hl<8bqcG$ystDD{?$9{@;^ z%W60xRMjnCqLb8sNU6{Ujr<-G(IgV)S0(b$?)@0hUA-s{nF@7eWVfI&5K1}Rr_W`c z>R-2wNn2AG0~fdkh(j)b%VxyGzKTOwXB)38HYX(C3lKD)Hi9(w2rCp-KL3fG8AJe& z-&{P3)fWU6-rh}#lUc<7_1f=4Sq-?F*-4!_7YM#~rp+;wOTmo@9`Ey1MIJ}<^Aj+r zBXoavJD8UDsfM;b8rzs$z(n@z13l2$!tp^iZzeZ;B16$Fu$CMCmB;O%d ztcm>?hM2UPhUAgAONdq`UmCRH{Az$#w{}fQ|B09B@-8&Q<+p%~NGtqhfm#(uSc4)q zEZs|pmgBh;p6~Kf-3Nx*+3KA4ebr#);=!NE^3{L%N4U+ZucO|WL-a8wXmdw@=**Ot z5Jwbzhw#*+^5*Y}&ySAS8&<-IB0Z^UwR+WJC6|hVgHeqR^raI3UH9eV_CRUBJc%C# z0v`cnats9!|CdGsd+?a^=qDm;RRjNHH6os3L~wb9tz2-=^)E)vvctW@v}r^Tp1Ytb z%0erv2s8j7JPj*gx$!UDpt|=0GlRG+jdWrC)1R*CP}KQXo-O59{pc*cM^rQp_UK?{ zLyFU|flqEWmCOT&yzPF#f5u=$rtBJ0Y?Ia|`$a^JerqE(rX&|KlSWT%%dQ6`MW4rQ zw^!n^J-ecm0J~XCX-Yzaz7HU3P(#gJY}ZK76OpLUEwWbOO~itX*X+xz}Rit`}G#MyVUimOTrsqoIrf58SeefunD zlLU*^I}h2vTRz!y-@4idwYoaz>b)QwlEi+=me=(p^Ki&^Ou%pM$fvA_{156zH@Yp+ zYj^M%wIJkcWvCEyrn1Nh8uLpI@)uEV)eW2Vq5TmPaLxXsuIEGVCRoQ7!F=gQyM(wZ zK|;yB-`~JxTTsbtuWF$5r*1I^>ZerhB^~{x{-C8AL+UV8G^u|Mjf?KX7~B13qtkB& z4fj2H+oXSK#rNFCjR4-A3j{g}wtWlEI<+@_8a8b;DC2%v!P0sWxwi6{usbuE#fN*Y zs9{@&f@ms}Cw7AmFUt33Cf}KcBw4+d{`~xQ6Tckx?G`~M#clK@w7hv35o;1K{qr78 z(_pL$xSDROU~Gl&zbFgC9CmRgs*t*U`;{I32^u0If3__E_Zn2;tu~vCD3i#=kSZ7^ zL%yggrEWs8Op|^z0Mk&?IeCPkDvcB5fse&{RjbQb37l9f(7liXcXS4@o58TKWW6v( ziX#`L=O!<}!#RI%h+IR{`uA&ez`RJN1~v5 zn%zybDxsDOT1M0~o=%l(O>b)oS-<_xZU4KOq%y&{fLm9b zLx@ym+P}}V^?gsAQ>ptIX-Iu2kX;UYYgdQ!xt8ftaH9n_#>;)U>H}n^+wohXT?UL+ z`+!n}h8$8KHArq^dp0lB`xI)T`X|->(?f97N5-=JPJ7-I{kXQ?E&43pZmk4b+Y9)! z1)cAC-vCdK*HPc>i;agQnUu!p@~ghe!|a! z>2RnH=ONVuo_$l6>N<}h8{R{#uPx9rav@L}4gZDk8eP>ZM(b0AetVdNeC9e8K}L5o zkfi!#`wmTKQty12z~CwOj*v0kJMX^<-SJ)~Ry4~uu=*Evz%S2yTp+;SR}?5NW=Dj%Y~F9-A#t0rp8MFr8{lznGQv2hIFd+8 zWPu|Y6kWA7t4x2r6ovT<(wc|<(!65`S-fvkKMiC4s|E5Z(E zg}GoUpZm(ZZy1%Q?ds}Hu+u>tZ@YTaS@5T8b7}~B_{=T6*jDa=a7JZ)eLoSpwQcW3LUL!_1wC))awtes!+>7 zJRaPYMy}993(#WU^8D^`xI|4zP3khJovxQ)+UCRN2F1CdW{}k$5xO+hA>`UW#+ds` z%-7S8VrXc;L4n&2yQyo6DNP0(N1_SOmCeMA-Pc>sq8eb~klv>TzW54VY^VNj{JpDx zY0N&hkILLbsBb1o1o)j%?|3Q(3sI-~zXyW9yIW4}>D0lTY z_D9IqoTWYH@yAaoM)sy;XSABeZc^2Q4?~n*>R+_ZufpAVZ>W!?p`j#LIWTtfK*LGm zTz7jCVIxi5_}uW^zXY(#d%Mf6rdnIhd+t(F4={zz`Gx6y(0P z$BOiBxppb+366KB-MIhRpRMAf2KH*sQkDHd?KnV^xU2qciSEsGG9wjAq*}tBpZ#Ig z2&On?&h7Uk0JE>~@+r|rK&e!oIP!uaOe~_9sz+myIus!l${apz1Y`8X)9@|BphN_E z6|ZdgrP>+(`R9aI6(@7RN-#pp9iA&)`DPr?kbwxAR>6WjnBA1l^YFkM7K@|E4jiTX zyRye3p59m@6`MBH5*M3HD@v?0KMMg8oej^8x%?>P-(hIgH@DwOMO6TQr7`-#s$D~e zYE-P=-hzeK_G=H%?9mwTa;*2cJs>OY$;_>B)Al;PZB%9;=#1U77-}XZDE`(kP`Bba zQ>;Om)F&v>%nE?dWEe&alc9p|Z`Y~W^Kw5l{=W{#~v6RpV`ka)C3#MMZ%0 z!i~(RST&HgYLU1)Ti;{RWY7&;n^v6!@d@qpx*5aOi_}f@b35y~X_BwW^FZbNiuKWw&-%(U^eFc2iUu0* z5>R&8_g3O=Nxab_*Io;u+bfB1I-4OO?*1FI-2ITp8j*ApZ%uB9<}2S@#Z(Jp#R7}z zBMIJOLD)B-x;s1KY^!*fDNoVAC~Aag4X6}|7-lXZhZ2z-aac7Aq<|P_^<82@seBj$ z8W>IJQ|)-;o})H&Mgj%=+Q{otKm5l2HTGIJ+eDTKsG}$d?HzJqE2v{74=lbbyhb?(4TOTP&#Wei>sEr7kTGZTH!k*)TiggQ%=rh5-b(}7w~28 z7E>4|UlRW2=;)qe^)9f2boQO6E)E6GS77r2lI*tV_gcNj-*6P;E*swzN5pf&*%d=? zQ%>SUjf+qoz~n*Z>Kea=6<3jE&k2+Lb_GDmqvT$+DTWlvw1U!%KH*8fol#JjX-$Ht z&7-mA1Q{TpQPeQOkW^X2LI@EkJ~5`Zo;hsU{3LwRjx7< zeAiFsoz27_9xcya0y@naI!c5#GI==BBJ)RH&6~{%^Q?XR+^CJwr*ljp;t&n8YVMWF z!Rl6?*OfX2+$4usDotq2OuoI3l34*_>3Xvnw)31&lbex)ry?wwuuIeSSoi4SeHxYp z4gkQgJ5~=v4294tjC8Yn;@Ar*K_3ytq1u-%1%mj4#cFdxY11twku(j92S8%C%mF%* zx*Xe>xb&-HrzLRsY4}TSb|ZB<;FJSly*$X}nF1!3JX%IX(tl*G<4J{R3Ap$neXLN_#mBjE9qgfa6NActc>~ zu+_ZIz3E#6w47p?%B7#AH_*q857iz%-oDiQ38|TL%fvCcGP;PLX=!uEC4GBIsZ4-S zn!ljl3NPgT7zd!p65iP>lHw`HP05|xzdhxqQWS9(9T6q17Rp#gG|V56Xo za{hW>|2h^Unl8*(^1CH9F^CB7ph${wb5Yc=8~1zX$`|lgqb=8r-SFge=P;K)?a`NV zXrltD+K;xB6ju%6#R+Wj^_>A$>`0PhwSJR%-S*mF_1iS9TAg}Wx;G~`7NJO7Q|b`3 zEj(jm?s&0)5RF6=_x9WGZ|k-r-PanMyOfZS=|}n`>|=@j!A#PdR^Gsr|LJ%ET^H(i zp)S660sk+6NLli~X~4)TEu-)Li{Z;ySp-hq2zhy42A|B_tqQr3r{BpF*wq-nYONDT zjqUz5;feQR5#_j}16POHPPuUuX9Nc3?OIy=v)X?W9diVe>q(ZZ#pYZ8{`pG@B1nix zuf9adHoqwjHEVIF5tqYW~d2<8_iX;lr4h2yeC@`G~GZgi_)o_xZ@wS9b8i}N%==!Z`y)RjpT)zC?S~%j4ukSk}HtUD-z$nmF>a|58rRD z*ZN(IrT%0ut_yV>Y_(KXC)%hkLR`2NJ$#hP2$Im)y%HDwl8TzqNI^)rR#Fw9eS@B8 zn};Iz4Hia=-@Tn%TCAHX8nm8Q#}*`*FfIDm3O!!%bUvkoU>PR_EU$UY*b} z&_T$vbsOU>C6qw;Jc2a(o+-4#u-o8~DFJ5=TT~P$2RwvD;J##$Fbl`n_t`@fBn$!Y6@;bC}}NU>j4K z<>FB#D+LC0ZGPnT>f`sQEp!YIrdM=dUwxd_43zZ#n>w_DnvNA;>6-d2zFAAC3HWTc zb{Sh`sL4jxZ$7DawK#eUXRmcsg~=d*K2igVzdRT!bD*W8!j1xSIfXR$NaXM9SQ=rv zKERm|yVv#j{iuefJlFYkGA}!gVa^wU)dM5}-_RZ9xoMEEu>AFXBN5AeZ`H*imZ@)z zeHQYuqafpog%0Q9GQZ8cK$Z$BCf0(^%(ijmJA3N#s$K~}jy15^xphMq&JMVgQMk?@@aJ6!C>gB4VlWjA zdb$>-8BZQrY8E*mHzWi}M89!H2&-O7d74P0{Us73n5aSM--{tJ(CPs}YeySgg6_rx zzh3$t8a6b>IZDl-aQHs$&`Lx&3~n@78VoLQintEM7nK5_vOS0*qv49mF`F9}#R6~$ z7s?-{A8DS7p?@^(q=y7=5;3kz6%-_`04^Vv>z<^>lQuI5;9kK$HhEaD%j63rM3IFq zC1Ls^n{wy~a&DArFt8j*h6B*nu>mA6?HE%H(lIFc zL01zCb^55$*2kg&V?M+0jWDs76Xe@BpOh@p0~;rxe|f^`C25O-E0|*z(E= z78H&ngcZ9UlDd96#i;M;n~VzbgR(7DHNH&EYKTBP$6MkEIF^rXHnpel90awY=`W}h z!!RZ=gi>VLYL!OFPY6$6Qj_tcVPU7z`2V(%k)f2ND^K?vKVLTc|9A{^SA`~sN+3t5 zELY8eVNzlE+A`PzH5@jjXb4>!^6ZA1md@xr3KWF{*LaFJSXr8}P$|Ti_R?iW9)98O zCJ2NO6kI*cTHvB?8O;oT<3cn8eme5Cm2t=d5uFyu2g6&M7^VB`uS*hp zlD@Ccy2x(HIXot9WocRQBYl5!On{qg7q)q2sdt~VW`4v~O-@_lVr=C3>kz25<&I|= z|0>m4udlQK@WQ)T##BBDmQk2=32$hX@mcwJ=NPA++SacMt6nvcRNiS z5uBT6-9lXl;xwCqKX}2#cwZLeiZtp~-5V>F5W7j6lAbqI^~V67pWM$F#rK}V?yvsp zKT{1H$&cSA_=1IMHT2VF0#OOfc9 zyC%%6HygCa6-LQ8%hMOBwZ#E2Op?^Ug_Ut!)c&=#vFJF-4pF$bTE02r zg=qml;0QHM**0@@aq+bUkE@>=8LYPxnWM(lWlf~RO#d2j#fl;SD7(V)US~_X8?fFR zt9n^81>U7uy=i%<`R3FcP+p&96EM==4$LQ16@PFCGJhZEDLfXdY6*Y_pV~Q~k>7Dh zBXB@ol1F=ZM4^}0MmG72S!8V7=RHUwGC8THijID2Rq8?LKH3l3G?vvqkv38!mFrhw zBWdcMPpSmA0lW6ySanxfSQHCXd=!iQY*#AbHvBv_ZyhaCzV3zW(`Dfeev%o3v&w$% z61F=rXlc6B)cTun#ZY(iy!f{+sNP5!Rk5^&M=E&#F@=S!|2sy#vbXM54HJ|t<94t4 znxFsUTC?zCr}$*@_mQN+R%5ar9@`~{s1v_D2d=y)?fP7i7Ry>dF=$fq1+uEn4K1xC@n)I@N%R5CmdyC$x3!xSgBF_uk zT1i88c+Xy;)#H$-)n^BvX0l1r^Zp-&IVKh%=}RiqOx$J>Xu=e*ES8YW^Nckz-;Z4Z z8@*gh-eZc`_3)A}4hp`0S4+MEt*%sSc6`1TK^1ZYb$tXLUpLpkXh@0ppjm1X;2;9t zQIqH21X$*4~-PiT2AY|mmN$g0)}nde?bjD&YH7;&Z$A79B2S(SDQ zpW$;*wTM9C5QeDL#c98HC>Mq{^STtA2BGnrnz-yalA$i#L>rb2tCLnu8Nz$ytv%>n4lH&e8bXL{ZwOpVk-$6`EpZNYAs6 zDrX6+`5IA^{%E4sl`JJdO>BLH0HojixHw7v%2YLlo}W1FGKd-W5Q~BR{B6n8iOIP} zq8eQ|bxuOtrh5%?Ya8~JWj0JN`3x0K4G^gS^FcLsZ04lPEWYrmV6 z;lIM&@__u#&ilJ)HRN!owW~@e+i(+D=&bBvQ7-GOsQteqcDS)X-!wETm)lpb(o3nd zZT1Fh)y9LR-!v-hZ3itnD?GY2u{rG%kml0|BnH2bhTC)&oZ$5FQPWlA;Dxw4X{n_d zO9Y~5T$7p&5?nr2ns{yhxy9Ie@Bf}f%ZTsyRHqB8z>cgGSi_u+RjvoD{19mwxg*1` z`Y+$iCd$8>XG_7X%DX-radi4r4Gw4TQCpKOwmj-u(({$n`<8f0R4#EPl(7=GqBfzj zG5~3o&K#{|jV3pE@uBUo1c4z8_19$G>CY4ctDc9aGY_FB4uL8S6dg2bvEmQ4v8s<2 zq85f@U&!-DT_ty}wgt~y9lC9w5nncD&ieBOq8qf9l|5^|ojM4tJ@aS3?0wTKV1*jz zCo)T6dC?@H+^SW(p;bJF*PY0z^PXp#>9Eifp-El|^}9$eBUfKqffkw^j@KZD@;j-G zZPXXogQsH)^tNLDB?O9{mx?wd*_TIXq&UZPT~R=Gl1tmR)-0JY6Ywc0HZkQwNo7Qu z;=tLHf8;*awrKlQ=kp)xhe;k$jpRQ#PG=Js6UlAh&Spv@p{UDkO07?{1NP_yYNpIl z(|I}aCKwkY@kSTZo8=#teAgeIR-tZ^RZ77PD4JlO2^F4_R(TvQ&fO{=Em7B>YA9u% z3PkDxYWuS%Y@kl`L{7a~wYUIRi^U&fJnH&s6>F>Y7?P{=y%p>z$}P5$HKPd}je2w( z1L6g|OZo5I9S$mleRU5a%mYDXQY1Tqq79gDEFwTNcX<3g2y0zBott|7=K7NmGA|xV zGFrwzN9{1Yca7wvU_mmS6|#pRNt{ZanQ1$8O}U)CZSR7jw*v<|2)qW7aW7A}MKg0-VQ^Rv27vWFV z)q!R={3qT6b~zo`jHIwG+HRU)3Ae}y++q6`JG!iK#TzoQU<^7fOU4Cb9SbWc(3bgg*r7F0| zSmVphakVCHV{!6c9!jNT)NLU@U>`+ETK)3|4Xsot0YFh3Dn%A zFOpNAMYb-rC@+fs;Mb+s>d1^k2R5Im5@&4C#c?rFQ|D4Q-tD$GWa`Dfj#%ALlV;39 z6M6#!3?S6clrm18eDw;uJ&rPcZ1a>s8jmO@DhtztvR(CRk2eSuM4g4z^Y-M1EomAW zofc+~(dvy6)ZP6bm~0^f4Op|Y;*TWK7-$N(fH^B?zwOL3@xT(0g?Gh0Y zrh77Qp2^GT_Z}bV>78jejM#X6iLZT%fjhv55^8#mN>(B8Jeg!wtCBr5EXGsz|7vjw z1Vytq6i&iUg%Fh+{A485j4{GbJ?c=axN%~gL}k>T+bD-@hy(ii=vj7-Vg6Q^Lg0Q%-{*AYY#;$f27=`rO8=rRo0qL8TzTUwd*ToQ@F{@M z$Xx-FCCuiGh5lC={Zv&T+qZGq`uJErK9+wwe{d2E(t z+y6cw>H*ghWt8_Ht#g?UW>C@d2OQQP-E0&V8l&9uPa=nhu03Ckax}dt6hvj8S&DL+ zV+o-%D+nfxecM9UG88LBDv;etNmprTRKm&0FRy1xzaNf6gGO!)|LL0>nXF?4`K^xf z*u1v^p%4QIVQvV|i(%>Fjk35S5_mu)ULCB^p%Sj`yd06XdreX2{1XXtRQ zH(K)96LYlla zEPBs9Tk#0I(wml_3%bfM!y`ngM^WRx(1|xCVy+mAPRY!Sjk@a{k9CB&Rv)0eU?e3c z*^A23j~z;wz5{7>z*%O<&;y*NQFXimvmX9m-eM&b(ymU_p8h6MB6N;A7#4zu4qOr3 zM>$yqxMyAY6!cDMfy|H*41Eg-3B7XrTTb|W`WHZ6a|pGIZkPEZq@Cd51j$z5jn7`t z^H;JY1k%76dFulqzqMP{UQcG~Hv2guE-;eBWci}xILg7wz_GOQdo2&kMjwCMx{1W_ z6MSc{+Grz6tmWy2zK3ItAv1dDeviHj*{{3QWHTo+D7smH z?qGhttI5$qMK8(JLo^XPx>u|5HTGGZLp}WQFwn=Xr9{KP%E;hRuH#hK3i{vn_--%M zQeW7W&o&JI`3iDM2dat5J9R2(O4tUPE3}}i2F52ux};}$CaZBCb^YG zG#xF_%fqI2966)Q8`W?Pnl*uW^}`YEjgpUaMsmWd7A;?pp-}WkEe)@x2?gL54V(4! zsFwz0d<(>;36M<+&}m34Cr@UCRm_}8+c*u`Jvn#sw>8*_G+Jl;!raXx;bFxt^%nTS zJrYq0A@>VmRQ321>+d^-q7`DooH8zQI17IxS-&GPrd~q)JGacnRBaSIVJ%^!HWI&r zjmKWJ+X&caMO2Jp0Rx4Bt5}uWr2I8qIh_>-vZ$B;7C>z@X9^Upt7Y=8|G(o?~MMYjBwK zPW=`E!qQPFL(={_r}*4vNs-ocKr9cK;|h-g9k>y;TY18=)D z7T$suYov%Ij~nay4<^0dmegh4PcG$J@-&N)5m&PsP0X{1?aA>o2%@rI?CrOkj6IKtU@* z+H*vE2P3Jm} zIExy3+{I8(&l0F>xc`8J1;ae#<5qSe^Y>ZO(|-X82PHy|*;EZ5{ErTlWv2;j3#iB{ zXTrDIE2vLdWn%)nnnhqRK~y&6yn!ex2><)>=5OM{{drb711_ziTF zwSLAi=~EZVh01?%Rs}D@TT^~c0>rbWJD6RTTNm zP4eXFZ+bBr+jnCRJLtb6<#%WDxAtDY2o$S_00|8?PQqw1D~^~Tk;$Xl;#SEo+e0&A zuEfTSja6ccdvX2yKVRsm5r?9;m+`ZAjh<$B66imH%x}7M2YsZ(+hz z&B;vYxC}_96BU}PdgIu4EAvqT6uGkLfZ-&$p==hniY9L5nmpRCmAbIxzHk-bQoW1b zSbR37BY>AvpOV&ECr2v?_nJQE+Z>+$xyL=>{40O`MM&XmCI3f=fZOxIc_I89GPE-E zdK7S5>l22yk%39}%Z-vAnsdiE@>mM2$&j_2Y@T?oj_T<=H4rU8=)!8N8N6x)o%y0F zXMn%e#hDq{QHpjdQ#xzPp2W%ChmPso-73r(3dY5w?BVDk366;3NuL!}VImngVKixJ z3B;m7^GOlIzs^t-wtz0*&>H+1V5p<+EZa3ne>lc$+R= zUw-K9D2tSS=s(4S0a#womOlLR$Tq?;v)z+Dj;c%^k-3neL0n4|Dj#?YLUGG${r@8) z|Fq&Aq5;22d+;wJADNy5Js5Hi01Otg#Ac1!6$9|nN%*@k-n#xpP>Q)wj=Uh>mBj#= z)fJ0GZP|Xw#w@rH8_l#jY593tL5AmoZzs&YuX31?d09Eqa(QA*_#!5fU}?FY-`*~! z{nS83F{H%s){?(M2kX$eXmBQj%uqhgTyG299ny=&qEs;=!DpYoJF*cGO!Gg|$JL)A zDR?E#PXDGPe#^uEBY^c@Z6QsY&M+GARbt$;Yg^C9P`6sa?@eRqT)nCS2YqksFrH>W zUxB-N)mgzJwBi)UxRy0IP-7X*Cwr{B$*X?A*jfZuOqS5nKwDSn$UaTsrz;U5etp(Z z!g~n-m2wG0eKyXdDV()B9N$Vd`H4cF0ry*L{R9#yKQ0#U8#5y}VQ7UZ{!Dy%a3H@k zX<^77M-vvHNT?_&1|Tf;aH_m+x^ISTu}*eRg`MmYUaxD(%>)Q(B(Ny6^JyN5(A~g` zBfCf*j^$PWxo2I9-CThRus%!R6NTZZTM{8?hnLv$9*4=|R9<2lmax4yKZ^ePMoo*2 zp#bNwIg{FWj&sV_f#)yOY{gf0^1qdVj=h-4g_pd3EmY$>4t18Md>7Sa_tIm(xltS- zk(Q%L2}Tx4bVzD!2`v|Q%3-!9*-B2gY&e=JhjLlG7(msBQq1iVPq{e7T1R8D@vKY zxo1FDM`c1>vvCxWru7T?2M&OZsX(%~M&&5!WNGy$^r4s6C*Hrz;V^;rjwg{oS$wL& z*y?CRXtwoV(teNrRi~@}GI(AYlWOQ7?dD8gtLu6lq|M`5qt^LQnhyxM#YSkn4}-i> zN+Nq4%jE{G%E#Qty@wILYMk)kq2#)a=h{bY#HCC{%A(8IoMRBuVfWk~=nv=J=gR)Y zZuD_@csOYTBNbmOE&Wxm69spQ$z}U9-MwtU$cYhS;#qx$>7opFDz;~0Y(n4$!guLx z2U_ufD`=0gjiq&|0oV&&V>9$|=b~!kV4CtX#NP)#P zVaO~;_Q{QH2!jJ5!>%QihTn>#OLt_u^1auaq4s1Qjc8HfKz(61Z=8bX)|CSG!Mhn0Ngg7Ko;N56SiMYgJ35zcaH%Hw(8P2qI} zcOQ>*oUmRAILwumM>?*MQ$^oqtJiil`gDdIN1kY1S?Ye-9f0eg2Dy^2G%Dz_1nC-p zRd5*Uq7n6CB%`LzbzVOnHWBB)IX&u@5qgWpPIz~?D_C58mv|WL`gGBAkbA~jC1Z|z z3NRRu<{>qx`m&n-Inpr0|C>d~{Nf{Fm)QP;*}EJ~Kx+h)ZT9tVI2O5;{SX8)I=Kp@ zNLd15;UjtZ_@{YI;nl_uy2ut68xKM3Bu&RdlWzX?B7zfEx@^U#_1xOP@1j+n>-AbJ zG@P(oX@Z$a+u#1+1azw7&Z&5Zhl%hzcU_Ky}AO_Kot%#zxg@Mi9S2iCs1Anpnp z!+9-g(?O=7pzUoP`%dC}t8UO(hxdcy=@M&?Ra>?9gZ=4pozw<eT}vJoa)?{hC5ro{4Ab)n8~~v>3MTHHKRm(Q3}hKzL_`@tt{@XPsZ)TemDH?pNmN zTW(na1sZ!7obfn?&LsC%jTDt(@GAuL3r7!Vyb4@6oT*>y(#-5LPqQgQ3QPHSMX%SR zW(G$>_^1ELnEhy9yWX%;*VVOve1haX=2OuuyCft{4QQaE3ME`*ljK2bCemWN zy!kyxZ!|6h?#bz2;-eTRMIdoo10-(4cx7&R&&o&T(yM97Ap}$X%Q}Mq66t5g*0I6T z>Vr{)!2m{`9J|%Vqp4h2UiZ_V6PbMZaw$xy99Hp^4BJVN@%lu1)t6%gN)i(KYQjd+ zQ6Q=Yxg!ZQaU_!({8)`~0QO=VZATh(XK!BJRV&`OXI5PD{uqPftq7hQUK0BW) zO2}I6%#UkB`z8yO_Y< zQ6bh|owuJ9uz2NE!d0zXqA5CVnT#X#!*y^SmM@qA&HwDWuLuM(8~FOwB>YNYG_&`< z)#AFLHni<7SQaB=k`F*%=#udFYuD955`-O%452FFA+TZSbcv z@ZT>qj@T}Nasq;Xc7~?fu-SMs3rVQqu9%)kv9{@daCt}sNlNtf=stf|3CCxq=0Pit zFJbW}j^v!UjJK(?9wXBuNsIm5fC+_-I4HyViKcO_IILd6G4HhcZ*O_yyArAk1EE^f zUrOy~-WbJT8}r0`=kzLT2ES(;TG-$M(sJq5gIKOZKH50`d^qPygOjJqyKNp__Z2HEDq#*WAb(KUBcw6hnFsKDgSJAsq5(O zeNfYPp7`RawA>8eJ&?#CC#+4uQZ-9*T>XOh6w9QUJ>(phTU_WM-gE?JX6Ct4HM*bG z;%>Fhqnj8Bz7-{X#_kyzBpoG?fs*;c(S~V$`a6(nF=p#r2quJ*-sefk3#N<8#wb&j z=@ic^T_W^2T;{iyBiqW6WeH0c43{3#$Jl8=Y|{qh8<;tVttKEq+2H<~Fc9Xg@x|A0 zK;-GnAB5!d@rtA5_xOpslIF?<1UnB0$5tK>a~8eAjGfrcm<2_RK!CwmS?*Mq+?R^4Nh#&w?k6N# zQ5KPRF$y|=%t&lZ6(uL-t)YqD92Ah@Y9 zFb&?7LIrZYx;Si+;^2?QfgMsE=+1zdo+_*j%yT}Yrpt0B!N#-3URIWGxI&)RsF$M?A%0D+hXK@0|Inx1)n2KMz&kpW+*JG7 z(!A@BpE~MVT;10#G~!N{=aJGf&=9qU#(!Ado&jp0MmcC;JdJ((@UZw^!i}4y=-{oY zPQwB=E~OyZg}O{;FNpwb_PB&yFBC$DaAk>La%KRo@uA3?p7Eg`0t;|q7`gyVfKid3=v#=Wc2 z{O|9)!$NW?%_3`+lGEwa8Sz@1pA1&=M?}WWW(e~$NOVkxY?F)QL3XP%?$%GA$+WBF$b?n)72K`NLm%At2^g2^xU( zv#EIo=Gs6D`l)QbO(0z2mx%5^hyVIf-kzIx62NwbqRrWVzCb?yhcXTA}Th+%Dj4+cCoP62)j%o!`nzt3dvB;fHit%vgBzkR{=!&;dpVS@O6~~>unxb!VdwOiICqh z`_t>b!DLX99~<)tiTLGy%D+DK65b~I!Ixhc4Suqz##F#IjJF?LYSt9H-r<-IhT|!J z#vsxP6myl_T2a4tjNROW-rq~#zrF$;s^am2sLcDmt`Fd#{?>N1YQ`Yz7?8bl&{G^{ z|JcLHumX{?1B*dWCk_3S?Pb@mF#jy)KiAmmDQK>4Bn{z=?X{qLO$M#K?b8q;T&mT4 za*i5RYp;3I8pdr5#Gjxt+yIkYtRl?*GZj^9`2e$JY4J11-6r(s-A+%^zLn`vK@SR` z8qZ=RIPLZe^0#>FlAjo4>^v`b@HP9-k*E7#N<8=hG@@d=S5sNJ06M55wOI3w`5qYZr0Wi&f3FG8+&iBs@DOnP43uf&W4CM#XS2Cux5)CgZ z;{P(;9@V(!pO8tPLRTc+wB;-EHb>9P6?5xZ{`l?KlR!q)=3!I*U!I5`I55S{2xi&q zHF6Bvk)~>GJ_0m_(EMk_Xl^ez-9L91BC)p|K#OeOS{?1j>X~naquQZbrPH214vSv$ zHv4mSgoT+-ywtMGcR7GBFl8()XiAcJ^sy+h&oya-on-hQ2Sq-CJ$JpYP~t7%*lE9vlK;MAZ<`a|mGVCgK0$j*9D1c3qp?G}C7-9c>M%Zc-M z4f{Skg95g4D~Ex+kJX*{CT-U6^lUU5pYmV^b^M{8hp3k=A6Bj31f&CuXVh}txA@p% z#ftXTE=S%4{a$q`fKW5`ID4}s8mjibqPhJHpa1zr7U}xN0kXNEmhW@vCWlJz zey?s48F(JOqEq^9A~?odYO}7CGQkxX&ox0~XKQCGEyOWZUK^N|*B2TxK4}Aixhe2R z)8SqB(7!vmaelyxYDvo$$!w;6HhjXN_?i$w_&77x{RbukWAyO#J)au+lHcOfXA4Er z3=$E!2pM(Dv>YH_B)xM_i7(z+-XMgKy*U1P$mw#DbmzrDO5|kTidO~(sr6c1mT z?LQqOJ~a0YHzP#zmfZ9|JxX(`X`xPobt7roGWdb)V9~{uqYh^Q;{n7moI#1qEq4kB?8gLm=g5esK%Npxw3Nx2xK)lM- z9>mNFoetHsnxbU1_DIruH=Q6Ed@)`nB!I>YdY)07u?L^I&uMP_V1ravyzIT;hVL7RMiuM= z0$310rH*ZpwT|ZRVpMxYkESnm37#%bk>uP z1H~i4G=5-x9`AkvBBR^JO{N@hT)O!wsCLF}zEV>g>+-_Vg{)1XN6qhQO9=8@IZFJc z^pO|v(Tf)2F4imkFfi&Y4tS)WVZN6682<9m-Hdy`SiN%GP?jOpkRRL0;EZ1Hj0UvZ zLbM9bbgm3bu!`>;w~UhV%3^_rK$EIg#o~L>EO!UL#^=?-pag2(McJ=)XBzkg`LF(d zY;?tTEyqIL4~?3R?`4gXJWnzmsBkurMUs=F_&r;omztLr_$&fQCK1IYw78m8>L&MJ z;$8L^wgn^|qT+8k?LIeW0bxIx{S32P-Z63+L0g)s{N(=j3a_YtIrJ8$a_9?Doc*^p z0aKUNiXPIykCT?3c`*5PlK3Od$}blp$_pv z8bW6C+A*DcV!oj)2ZWv6UU?VMnubSH6yT@?orM=t;#0E$IsV!pqbVaQifYR-kLKtz zw0{f`TB|bGux9|{fqJ^Ij-(u9Tj>AJAK$`66SPgqciT=&!o;uG#GV{+%Od0M3trzv zh1Dpq8;`{{HhXzComGjAcAWX~EObXWguZg)bV~>iy3p5B8bdI$q>vtpiNFLgvKmdr4{y5 z!VG8RZ6;n+E@xDfK!>kGvEj}I8!!H3l#Oc;HP;}0y4$M70~^=;n6kzM#R5ACf4S}? zm2}}uyHUQ4npCK2cTpOW&CNzCiE6>zpATkIyg3%=s4&nc!3Bp%&sg_a-)rx6vX2oM z@yn4BlGh9Bn0-1{)+pwnHA1ef_Vj|rp*`}%TX22;^Ybs{Kh$|1NbEO;r?fH!>vr-z zM|RA!WJ7>a=hN39rNfsmz1SKMchuT-<9|qhxRiXU7~}q*SP|fQhfT6fUN_Yc$bH$K zfZvm)^YvPe3|7<3fs9cs&sYE&mnu6 zlAs16Ypkt(H1$r6q!Xiiu7$!cE8>mt1cWf3#1yu=y<;ck9+V=@-cEMzEz~Y=D_lHK zolS@rxHfy@>9g*Su8Uplbo)m}j1#pgrYkP#$Sg`qJ zlkx|=Q2j8}7@PUBNTZ!9p>HK(Bnk((H667A8CsZgn%^7ecqsQG2&Nd0wtjVGG{z29 zg5QFabWDV!spo+=6>6fj!edCth$FD!DB*~VH1*ZhCEGMl=ef8D_eBd;215xQw&B|s zU<#n?FIB=$xTTx+b_5-|R6%cX$qE*NCevdw39jp8x^WuC&R3L|XHCtk9aPb{e}gUx zNH>5EIp1MnL-z>}<1I4O60aDj8K3r->r$y1P0=QbU&v0z*MPbt(5Rizkj-dN+s9A- z%{t;3zWSc0cEW024}LUe`<&)_r5!TSIWus7Ay7{WmNRV3+a{CwlVqG%|9@3q$j`5L z3r%yZ?NsaaszcY;nI1cHII1VZe>?ge-?xji{d$+{EbxqKQkwP=?sxi~;G#c}R#!kG zefOoY^7YKu9;We|{P5^91mq)+I<|}0{B{Tr{^pNlF?kwyzZwk0$JpIT z%IA3w>xz&w`Ye#Niq%j87yiqZKjXL@2~{l&`}VPLwzZ`Qel_`#KF^t%19H!hwj5}d zZ@}CCzR1YP5Yb!$d6=YMt{s(NTXNPwwq_re69|s|={d8g@fI*O{4Fs5b=;M6!l}9=?==4{Ob1?}C0h6r z!JqE4V*}ix41a@%(HZ-E;h<*ij+LDi_${$=PUdka&?9n|h7rP1K9qJuVZ_Ez)DULp zBfI#RyC01yVf~S0TJXlP`!>r6ez7EO?p|5_`bHaaTwKwr>K&}ZTZg|sFP*U~E2IoB zzKrO_Ou`)};J)WWO&?WioPk&Vz>HbztNxQH9B?6ql{8lx7*7*;z8*-(9&mSZ7a=5o zAg%s*nu)yWb$yQd_zYZQ)KhOjp2fugQtcMx1_rT_xr}M$CedVq5mBYt#0bK1Otb`J zFWwRrhE3LMo16T8(b9k9CT_-f*!Ij1a^k8StM|a_&n(JpX^^rU^O11ZV-}Mhh3~M% zpv8KHgzVcz^10POQu^W3{p77)!|w?5>4A6kS+R+b&l5b~=Ub6B_cIpn`%{+np3~5f zf%;m-uHmw4J071>$oij;wX-mT)qkho;dJT?3KCsP1DvZe5D^Ox zyqf=>V2YYNDeG^fZ+4_SMB)b``2YgChhi>Y?-}{Ko)KXtv-=E$n7SLk4`@Uwzb`bt zxXx|Z!dj(^&mcU7q;ixP8KUhfFz_z5rpIoL&NN*&&NI2WK1DE3#`s$`P^KNL^mJ$V z@B;eNX!=P!sL5Vm9Vl@8kwu=^JEL_eR9YBD?I1rz_}GN;q^*&m1b_1Q&8M8qgcz~7 zL$)&H^Yo?;4meqIGRA-X3Mp{Kj%Qe`cos2Dz*gIDuUEv{i|3(BxauBvOCmyDPT(qd zS?f$(A;O#Uqtz}2DIhl8==HCVR7J=b%Q&`oB*_t)7y=ICT|gYl8WWH{dEI_o1xs`k zVXyP3oAzGXB33W2=w$KVN#NcnXH}L@>Qx960o;=!)yLmjdaj4y?2&#L{5}Rpbi~9!)FVOuO<=lBT*}o z@5vDfX7RcCqa9jtY_Oafws!9@C8=R#{a0{5=Y=1C0HRvnEsI8@=L}nc#6{5?{h2O7;jWD@x6R_IzaV1SSQW$vSmq;7rk2w%gAQpzGH&vQ%CFHGq zchWk~77u+One!_m)_S_8N04E3zFOZdOFa)H<}Ndt5{L#d4x0lDIxi!YOY5Sveb0k@1Om?(Jl6 z7|Za{@oenxjw6?&X;rgo3(?rTHhmz5rfo~D95)VqQ?T#?pSjVp+ZsltU~L%@KH7}i zlGFx`*=CAhW}Sm5+EE57Q0V+eIbAQAzs09VBXjcdG8vKBf!p@_61V{!9^RBvemavb z$zh&|f6&EnKb5j~27+IZUQZBUn6mnUsa~!gqJK5toj9-QbRzkGG(#2Bq4izscwPEg zL2EM>INV}aE21XIU_YfSRAtR9TzjEXcx_Iv)3qR0(ZodRj_mA8#`EyRvC%G=hgU^# zf5K*)h2)>fFx73t4?_Uu(?#9eFu4*gLCVM!Y6Pf`Zx7Z3;DhV=<6FgFHkE9Wf$M z80RPaAS9tW{^EZ78eHxpr7a{bp`ftn(n(0<7khBkcl%)E+(z8i}E>LHX8iYf>y=jEF0nfpsTLrm{Y2K%p4R9r^> z=+hvY(O2)2`b~N0EU~KekkI7Dq`P*<_sDQhAH$g1z0!AS(OYJ`gbE)Wt(7oszHdXd z%-_7uGaPt|Mp2EPtTUMv!M8(i9^U{EAw+o2aK%0V1sQo)flSD+;&vo~&N{m#8!i}x zhp<*x`*!d*{c6#`f7y?;Q5>M{)#yun&Rr3#0gs($J4;~na=i*P=)X3uQN)UM`zIs~ zazBM4;$9G^aX-yTX?s%T(lkv|2ot=8697Zg8lqH*oC7aXLqnNV{|YshMM>WJd;B*! zG?=mYe&H%D)t={{!RxoEWFL3MXZD$6Bf);DOOp{RZk*kD<-cS*#;&$emPqJ%uSq`x z*_v31)wYh+ke1r9Fp`;i8@{PIIz4NFGiZ5FsKBqz*wInvzufTZ$}1E#QhgNyyuqy^ zH8Ir+k10^CErIWaXbfY7D+2hAo9NqwanQR0PRocN&Q&y+Xp@Ez1M_U(3 zc_(J+Omc^H#r^Bnc$mtiFj>X2rGt8okrAa}mMxyW9jGB<#xK+~?~oG-J5N;L^M59d z32$pvQ4ba!ZOic7f&EmU&Xg{!fG+yFKnG{y)X&24cuTr-Qw!OrFjWaCQwN@(wUtf# zk_10*ZkK(7wqSrRjFQOV%airl8NDcdC3|7%Qw1*^vkg^*)q`1o z>?yTKO(;OlxjDc8kV?rBc!AG%fz$b%5El)kbph@-N_ut?z131P6IFpd3)y0IV;w~q+KIXf6?E@0UUiA_>^)>YAi()dT_OoGF5{Rb$_UKWoB zBhuQu@kFdNnbDiSzUug}h}V2~MzWU5f})A=&|OOQ*D=qAD&u$^#RAb|;P?vG$y7`8 z)LtDRM{gm#xpJjkSQF?|iEav*N>kv#nC0Q-nCkR#eNCjIWipeEkb=Lq^q>NK*vz9? z&LK_O%=hn>;W-sYaKBe*>(BpqhR6e(%5<+vYVn}|rhj&fg0cv*c%P67ioA@+iw)oRXfDfNGw^CYWHE zg4wSs6&~@rsCz`cX5N<-RY#fTd)vKpS&cEYOgt1GJye}JwETN-E1mhMv4t_Wl5=$a z3CS!K``=r-0W=PXo%9l_0&G`VuV!||*x+_T+Efb4WB zr2K{3WV1$Lw3WQzTJZZ~-Kl`!PE2{9Pjy8u{GE4i>CyXRg@|I34tsDr!<^jIw76w4_)TteL%6&Kbjo}1pXzkr_7^uJ z9h?xHD1)NZ{yqG%)IJw7YPdY6kxUXv;KqIc78Qq4c%EqOjMlL1wLh$OoZuu!?R2QE z`fCXX*BQ7fINVABNz76^ZECV?2Y1Fb#RMKPF?s?I{FGVp4`DFLhGI;o+w)?nwD ze;WG3;Q`(V{r<``-S}Q;ugTwNnM^PH^J{4?eI60Jdqca+G`d^e{v&RL9^JUvU{xCl zyf3$_;MKiJCEodFIoykFO^|v*bUyTz8H%c5F5n+S=uuXYG9VsWD}}2u!>#mo1j*!? z($X=m`Eb};q=2INpK!sfvLvv%I-+4%75Mm6w7 zHKivKWRQ@`Z-=nh`ZnQ&5EwP;b4;;o_tvFXn(_j#7CcimxN$m9w2`}%h<^sNhiKiO z{p8aa+x1()jVRzx@`?~2$fX%blZ>fCmK#kJ-tHAXoM>H$&_9d zfj-3)!i5fB@DLo^xM}miHM%l5ii9kVDeE2JHxOVCwu@fN_QqFc)BQ$kG<=3&EdN7~ zf8hOZ*x2_k@6T3Q?bmoD6y<50uCA_nZjWa*WOq*%|E&7$w~7;KZ=*`;b8vUP5a|*p zc83-6E9dBOq^xT`Vvo==rzqxEF1;8)8d!FR5tKa|;n>U_A;=M_t%-1(BaVaGtc{hG#3DJ@~%3ZXU|XNimhu6v8K&w7}Do z>!62Ie8kZ2g+Y`3gO@_hDnb_cs)cOkW|EM#ZRA)pR%v)jJpv(G&pVPcke);g4E{CLe ztavruu4^83q=PB9lH7a$iPN#dfbVsi5AfixTM530!S^ZOJ<*zC)nxi?)QfqBYj0-u z)ae`8;sTNDaej3wsPB!fn4%Y}HF*&j!4AjK>jjwt*tEd-zX~+M0W|SROEdPid_fn9 z>uzfbC)Nn&huqZ+Qm*4b65_Y=LCFdCX^mQhq;Zj-(G)owYzr_DTK+|Q=>DYIyB6Ov zB2}xla$Z<*Z8s~^d0|Vxe&J@EutmcAylpJJ9Oz}|wB=G;OMkel8$ZzV@s@&Vy(8qm z-rCFlnY9Q_vwd4VI${-R_fywo`9&+_N~ml4!wGdDN`b=2=zw`al+|rAOW^Y1VJMB= z0$1gUj(=Ir_P-NyfsBG7X7M8eh+bxxOT#3~c@q#O|Kn3#^wL)YTGum6?uiJ2q5Z;J z11#*M{js~q-^{i*`@n#o=d`Xbym9J3!(%b&!$+B*#rbAYjN-ang(!F^WtM;Ims2uu z;V0T4=UervPDgalmvdNBeTg*AGX@RnYPjcm@S2ByR)bRMc`s$LQpP)d+Yx*9ef~*H z&jfeLM!eLtUU8%)^QZAf>tE`Bhp3(D0Go#J{C!D6YpVGET4Rz{ogvx}1lFZGYO7zhv!=7x>j0m!P;=hGZDi^j#3YTF2;FO%Xt)Ct70O_(R(+Ae8UiRxp&SF z?jCJUzJoXFu<-9+wy*Vtz?)}~s{0$H4{8oq+q|{i7dxW5IBj_LNvE#l^u`B)nkG66 zR_Jhv#}z4+T?(IJJn^d~|L=dp0_>jujOm+7qrRGSw{%5K;%TEr0gHm4BT-ePnc)I~ z?=^5==uu@IoqO$WJ`U;{&MeSm*`O!Cg<5J66I%}Ju!%~qj5!e+Zj|Q*ZkwS+F`5~Q zYkw<#&9q`=k&>RVr3hs=O+U^MO0@C%v-W|5e%W-Pi!N`INN6)yKh)z;yfw2@*xX=3 zbxyhzI#e99y?kdwPuPnJu-t14Sw=-fp9vYL%IvK~#y)!bW2us98WPf<#Pit0VMmMZ>ogsr|fqg%}fTtR5pwmDjT@uu@;a@YdLov zh9_U>UM`bB1k+$hmK@LxmGSaI0wC9kqUb^w>7zkUg6C;o&;?`og6?|OI$R)P!~gmy z!)UvZRdQ)6AeWcl9t#-;c@!hMY>w}4!7wc|ETGQ&c>hG$N_&Kfd27Pt!z3B6)%+Jp zsWswfiHMvOW_CZ;U`$48Z&l*Uv63Wv+7osw5A1V`2HSYN=UxY_yjqF-*`3}beI4WG zYdPV&0(B=iKVml^_f47ynTf=`dn+^Kf$JiuY{$9-P2F3Fj_dhes{3~mQv1R8`%WA0 z*Xp87ykNPLr6__Kz^9Q;^!Bo(dy(l_S1m}~nGQ{g-!Ql#*JdZ>?cF;rLdQ(n|Klu@ z)qex3U8oUT_CYZ`u|A{5JPL{&G)W6tJy3_LgBt9CNfu%z#yF`=b8HqN%($|Ow8{3R z7vJ}Q1GffrdwXQ+O7;Tz5k}A|Axm`WTvkmrR;T4 zu@p$2(?ly(+DL~2wz4oYl~kUF9y1R=u_B26j7aI;;Dqja3@9jvWPY2FdN>S148Q<0Nde|2uMJpxjxO5BNk*Li|WrlBFBO!VLLbc2wCU*)xW zmNhFHnt6}~(JzSoTxTYVN52)WsY#nYEH~FWeIR6db6aq)Z=$27$H9LPtBaA%Tl)zf z=H#J?zl*6fVcqkyR4}}=Rf2I$BiE{~$&O9>R=Csm#|MyMt%A2GL==H2c;rLLAPitigl%9lrqp>&wKQ3dY_57wn`>HmhqE zh+Iz6jl;6&1i%<3(sS06ZBuPQ91K?&6a7beQKQW^$=)a@Ew+`l&~)f2?oXgJs_73hs~Bw&kmKlo2KUMEF2B?eK40MU)Ks zb?p$=9Yo9eUzP|s>?NW?<1e6m#4We#K zsnWiUYQP7Xa$hP4DyWvMz02G`&6ZJ8hq@sg-LohOzR-6eurUNl#n(|)-lunE(xGO# zpTYUOFgkAcp1}J4kx?XH39^$_4K<8 zpuDUa&3|iis<9w`Rc3`H;s`HZ3vI5okH#+~VXtKlgj$aB-OZi5j5@m4W@LD>qmRyB z3G{w&SdIofiTeAsj`(!?(Odsw3~?36`wCMGs`WZ^Ka*G_LGZ!1!$KEKGTus$D6Ouu z(gu^Q!9D=~igkCckKP?6G18mc5-_HLKO1IZ0O&|KQNC!i$vf}JBS#Y&zAHmzd4V_7$qjlHZSX?3 zr`r^IlVp*BUxu4KD`A0Tuu)I8U|q;cWnUEjPJZU7`J52vA)dB-m*XpDEGG`k&Ef2x zZ7S7#L*$CfShn|G2T;>ppWOWBpJiI^@Q-4FMU#my) z4=0~cxlq4N##Qip-NSkRIUUF`K*-%Yq=`FChy~zz-;rD(kD;OZfS$TPR@zS750gC% zOeLKd%vr_FhmEJ6YR$&r41CLH?yo-}Z@-!$1Nzdc3zfRXiL3V0*vF=!P+D1=`HaL) zEg-LkXYR5B%(zyV3RCAu8pG&vUF+){iJChwvi_07KRPW4?OQ z;1IG#_ydDz}L8ZkJIWNF?ID8^CF+S)vf@KlgJ0qzSm+%(kmwFNBW9bpiUd#bWFu=Nf-5&QagUBWqWnZW4?%Ic37Y4tsAiv+LQ+NfJF?L-RxTw2(C{0RG{Tk(guGg6R#J0*8d~v9310rpDmtjY^$-c zu^XdF8r!z*#x~m6YK(4dZP-|CY@3a7_r1TrVQ1$%GtW7nlbjXKan0XO4{);>xJBN_ zTB>6)iYJ0$HbN!gFRYcPar?E>L$gQ23`vzik|gSt0J~Ae7s|g6wtkovQqrogC zSn|E?p$9`r6SQ?mm4C9Y2xZt61C`R!k?o@88*&S$)|D1=KPg;akp=fZLgvSeoE(Ei z#B$dWymV`#arn=f+OTUT8fG9|TTOB=4&0~?+!Bp7b1;++hHuE=c5G*tM|$$wDFS| zDtXttl=1*x=X1;VnS)?fUgj>lP!nuiEoD*GU3aePsd1lIq23S<8b&-c?i$C!;t5YF zJ^NxXpnzF{PxGGar9C7*Lv3!&0i;j zeiXLsMavG+EDr7@Gruj_P=3U(IdTM1meA5xa^@{8!-wH%RQ$zedOC#aV6Q|9q$cmi zlI#x*)(9LZIF+&rwL z!y0hf$Gxai*2yAa{}x$qhZ0iWecV1i9=89Jge^*0VUM^y-B|iRta}r3S)o4vJ1zG( zTdF7IFbl8RQJQ(1l_6)GlK`8IL3Cp5E?Z)yYhH2btx{Tm%MHj9a=F|S3j&Q@KYZRR z-L^EbK!VoY;Oze`-`)DBPv=QAW~|;BV(ma`BX~-=*TnwW9^_~&N$JzK3c5Ez&qRS z&f)j=CqAfqMwAkr{M9c`D8|Rc)VRWt;4r^J_BaJZpW?0xN9e8fYVRi6#P-<+OtIxe zIqMOqa~Q`TUnTEKH&#Xo>0g-Fe*a7=zcu3==X&{96IKWf=?9g(!he8DF8cP=A%%Q4 z!~D$f63fl5N8b$y!Mz6`gEfEnRy#qJQgs;wohhV?Fu}#b->A4e6d9qd_)sh?K^B-^ z&o4Q=-Pa%h#~LS)juW~=`;a1;j}f-bg*2=1M-nG3{N~I?H%1Cfbv|%C=J*2_d(Qvd zLpb#zyh5RsP$%E8)77m%l!GuAXsZW5f;>IE+eK;Qrj? zM2wy0vT*9AtAG-hW%h`-c4|arT)fsoZ);lFpn}q`Um%XtN5!(IJEKOl(LN-tB@|`* zK=Z<^r5I!r>l(NVx*3#{(m8{W7_Onn*DA~6zBe!&H*ZPFzWgW9S_VIGvt-f}O!h4b zj~BTQgwJ7^Qv@on%M94jmkF1h+E-rW6Rr-0qTvMKnsN-C!ylx38(Pr0GLIyZst;l5 z;<{dK_21NG#XCbXN{k@}KX|lH;ZB^_4PSRg$b)L^l4e-W`oj>ICY=wb^3piZf9W@M zu{4;)+M2c-4aDAh*77ec#iG^LJXVI`#dYXhOVR^*$3Cbb+OjRlI6w%swsGZ}7f17% zJq{q^<-rr4s_dl6@3$LVh9^v>=BR@Fgo!Tv)Eou&?~jvBtz+X9vFq8I5?Yw z@UJv{86G#NRgd!ZtrTjATE+t{2^1cn8a-K>0IYA5B=V@H=xgnULhwzTbsTflqwyQF zmKX5dy_x#=?2T0~)!w>=OA}46R}p4vXyn_oI?7sJ37MowYn9;>Q2U+n3jH zbgs8SisekIC4%;jJz7G(AD0Vmr+{x z|DN1@sk$3gNFkYpdSO;?C_Kw;fS`K#{7*j|5#_ZsWGBfC2*Fnd2h_v*^QTcp5~#Xc z#;wvl*^blJ{nlXX0c%{N?^%QOAUVO`h`*6*>C2V!)SOJMr}H&=WVC8@ibfR*L;`TL zimI26=gTcwH!7LgoEmN#x*Q=9**fpbJ5f;(bfa|?`F{oxa=%v>+XC{svp*k65aCxp znqeU#Nmh2s*!iee9F=NO9J0LtE+h61?Kr`;yYDO~EfW$-;}d&eM6*_AV5+U0JmS|_ z+N~&H)07cSJV7+jg3gZ~Cc&O4Q%wX5J#9K_B*1UyX=B})bT9jc`HF}PUT83T*Iva- zT6a z7QzE^ayPEFgFm=qPI7wN{F5<)Fh!f4dJe)!;Gr?t6C#{U8>z_uIA_R>;3{S01j zCgO%ZH_+|WX@D~Kg6?5-gpEM@VzX_LEF=p=Leb8vq@k;Xo#AlFPglVQPHYZ~EnC5# zEb`2V=UZ;Z!VjnNUO^&p3)~5pm=Dbt^`bqo?#u zG(SN852Q0CQGsMM%!ze8nc3$~`0HH^w?;1eB2qegkUXOjb6D+^*DIon^mg}HsZmQD zU!xl#35#VEMaRN5LMj1?1$2uMG6H?9bT{{LMlj?D{_n#eSMVwSD&2ZAK8iLxH$C-f zwex$j2J{oT#)C#UZ37E4+g_dw*zNQ&@a)MSmkzBe8XTZb6?tbE#W+t|*@U*qVQZ;W zwbYMO(y61dEsLEVUB*hM%E(tv($#w>{q$=*3S=RbR@_>aS^pPy*vgFI8xmG$$G>yN z0_Xy`0;B@80#zTRxCxrX2z+cUr*gMd+(xb|KsvK-F`_(17HkwMQRx17Mm5FjZ~}4l z*d?SmgM3#lXse=+)i1rzydO!Q;R0W@`kW8d9?vn?8vziM_U=?3@po%e(=lZ|O?Y(F z=88{9C@jnhQO{9b6EF zvWYSOtfzSS?m(;$$4=%vv zXb*jn0Oe8G>K7GMd2_I*NY9fr0X@yJHP1gn4w6J#?FHEJnm?|(!^FVpmr_0uMeBb| z7VcWpLKFLQGg)l#3Mo?v)aCdq@GV_@jag_k*tg93;Gq$*z6g%)myM~_Gd4)Bc&<4% z3qij;U>zxk64KG7lXp~sc3?7TU*2v-~ zh2?jThhn^ly*&@LyudsNT3Fq)WKXaz8BhHf-HWp~H$f^VSeN<5r!8az8@bM0tH?x|%?c$*OJQ?xXK zIq>H<6G`W_VhW`FX{%*wk)4ZlEfP;qoX1k2ztzcgg4?@a zCES0NC+NNRCy)i!#_M!-R-W@i_1BnS*mUG{^fZ$Y6<4(RaS{ma>SjgS zP^y^2YdV(7c(mEu+c4NnQPh(aY@AP&{;3*{Z~Vak}SLY-y6Gf&5ol zY9u3M`W152ZbQvB(o7_U$xJJkfTOS+!(`J|hsgI%`PdbYhrawLRu;cax8ii|Z1}-4 zR;X5XQKM&!a6vYaPUs!$dt7M#oY`x+Enj~5D`1;v9wkPG799|fctI;yZNQ~Xqf1

>G^C+W)osTn>_1IN@Y~Q*%9@%8#UR7%|LKNPzYjUO$>Rq^>%wxD1h>6P zsAVerRFH?kvpD;Ud9|W3Dwi|{509k#RG1s097mkVW)UIF?N#=tGWr;RXXH}167ePR z3oYjXmo@k{1;lPCP>@n$k5)!wF_umHpkOBP!%R_ob+Qo9mrx#gBsiGy91S8mmR9MixOE-147$>At9*T7glen zriZ_8_+O|rL^Fx+)8+AijczO&f1c4A<#L(rZ9=xf>R25uw-RHEr+Q7ei&dN_EP4&; zpjA3L6KW82f6)6=`Iwd8!-e5~?4B1~oIlU%+eWuvo9}JeH#!<-e`$ZMZLBPD5)^2j zT{RoZ>)YFbx0eUfzna^>7wZhG&GZllt2HV^yL@kpWmD*u`h}jIRBw#wfs_rnNG>W+ z1%n`4Z_1^9@Ej98oPZ_$R~d(rcNFRa9c<1 zIpp61LyuTyN)RXxzc{Q{nX6{HWn37gF-6GT45-ZuM~~H8;=2nKjP@liz~k@Z-l(YQ zs}U+on2ht|wDbbny2$TUW!^OIE58tMZeSyGn0tEAbuiGr6{)Gi@W7?=+L#nZBv01; zK}Ag$xhcwxt7kICXTNgih0&iU;{O5evur0Tqw)H4E(_Ph<;l+F)}0`Q$c3N8@x{Y^ zz1MgMt2^_gy5UFGxgyW1sLvUa;rh{y7j=G!J4DApn_^9My8t)K^$Vhy@6Kc5@X}-o zwyi}j{I)tc#*)OUu$}MBFVilj#anPE`6oteFx@*|Zu#c`C6k888uvv;z6x(rlNT?mz&zSo>%zCjVSi!I!8SIzj`c+xMQXZwOLV3C-(}gXKxzphH5jNT`A#qo zG9A!RK(?>W<#(09!uK=XmcdRp?DR67%KdyaR~knJ-hT zK$iWzFe}E{?qua_en5?vI{AVW7<3OYGN}xono}eQm4hYJm=MKH%h>Rv9gJ4R%1_{* zHM!ZVnnD;OJjEB&bR&NL^dZH${kION_9CiCSG;Ym2HKAWS|WCTy1 zp2isuBpa^oAcupgsiG}LZUHj|QZ5n=VCz9^JhJ~P=E{^!2L#b&O{%5iSX&I?W_uDE zOn+iNUujl%GDXP38)lJvueP8y@FZ_bN^!@?dZioN^X9~+6QyU2%d14HBWwkqaiDHY z4P@dGumDfU_V));hqGYCIP$JXq#_wXrs~DhiAiNeG8w3Y`7=X0qP zA#T?Vt;m za8(4Tg1rQPeZ9eA)=4v0GW0a3p#tPwpLA{Le?ek&wwcs-zN(Rs45j)Pr3k)8H{#HE zN_=aE(nqxw-Dqzzu5U2LVC+Jwv>N}xUG<kyK_+7-nZP?7lzOhb#*{%5W^{cBX9*D^8R5QPU&jd>Y$-asM zb6~Df?KnhiWc{efmA5dF*Mr&I*H@6XetB z)F-`&*~K(3f^J4=*K}^FwUjU7@P*we;)J}X;@E_(t?fkIS+QO9pIZY?%&^m@d(M+u zcIBw&XDm8`G6ROQ#twf+P;lulBPeoieVsn&J6p~MefbeC2Ty$NI|IfM zE2M@Sx{#)Q~AM9o^&1S8HpU)8W7)-BKhT2H)bcL`XafPL+N z4+%1AG~CO{AjWVu8F)e?tapXInHsC^X9?4G1m2)uZj;W1-!znYmWBmu*5z;FnT(_z zf%Za^)1KCAsu(QLeUb!y+m^hM8*jRCplAPXNqGY|^2ficznfdU;-a;KR)TCE2gAQ%?0vAfHLr1GOKGtXgIaoKBW|k z5YT3AJDZKU7ZEG_6KPj=o>&{zG>5F9q1~lge~9az-|(mopRf7DrytDxIVK7k<|o4U zrxEA)B^b_W3x%YAepe zPTi0+9ohSk@iCtbzRV7fa5C{gL-Fn=Qn!tv>emMaUl4aN9JIUhx4+u^uhjC}EhI%Q zz#f@)(^uq5mL`3%z6qzy2Pw3MGaw%axdES*Dq=#{cLy3bKbAVtuNbOWpN~jG@8}f2 z>3@qVec140n~R?loP$+`sVEE%MiYm_`+}7`BN%Q37YK(!MIi-Cllzg+_p;NltjJ4= zM0v5_X&jlgoW=KR_&Se+F(Oz846$m<+GJ;?p7P=T4WgPa8ie<% zck{bVNfTsP)BrrXU<)$*=bsKsAn4722SbK3uZZ9w8T_}!KDlrnCJUuuDw{zoF7|=7 zfc5XXrX7g-Lr0C^s~iKBwSWxz^vi%R160A5atRts`pv=2iEkFJ@$G?bTNyBfAg^F} zrb=U3#|Ri&6H}iV{z6k(Rus=EBjoebafv+nKC1x13n#SRpf?wG%v*$&f0s5b<&%AT zx)x-&B)=vvjp0NclHgPeNG!OS*Umkx^4*EeQpF-VN8VoaKdZ^50XQuig5_QxDc_7> z@UYYdR*Ss*p0O*zV=yYelz%sFo3F%25NIGpT9co1dDDff>W7p?rY@i45G%Atgvb|$+kaNq#}bg051_w&pukg)pB0X;nD(s0r~K{2<4*8o zG_oPiPTp80M5&LnwnR-K63JlE=&TOq@N1<@Uv$81#K$yL;~<@AWeH=Ic*&#K27s^K z;rWmb&rDAG{1qm>oYtcrqh`=477Ju%1a=uzz;3trjh#1{FT{ZEVGK=|(Q;k(oa z9thhu0r^+l)|4U51zm)_I=LpUEC_5Ux3c6O!nQ0;8G`ur8~xIHYsARS`JJh06BPj8 zPRHv{>vpTR@TI>k4o-G9VI$7K9Y-P8-B27BCv^$Mc%Jv-5JO#NSShZHxY$yAKHf~9 zKB%}Pq}xO$Za96t6~B0?;J4d^dl#>@3103pmS3M-+n^g~>iKu-zC_YKze@9oMPGcL5_#+n;t?1T0cbenF}sL6M#DeD7q;2m3g))T$D^dNhW-e!|3^ z@y(9;R@weZc-1=eH83QY5FXnU+dyE6i;D0x#wzH044~{#Z%Uoj<6+6vAfF!2wyG0-~>?OJKV4gT(ucrKcS%Hbd9X_nkfBsLR@k6+% z@fV)+M^lvSUPFRF$rOs&tH+CRBIpBYD4q-+eMVA&kFO=jkGJD^ef#6-T1ye}kr8yJ zcc!j?=YE*SNe`)LgyB8w&PYV3fANSZ^nI;G4cijn3_k8LZwXmY+h`+S7 zg=y$a?m@+OdAu^66gu!b0!_Ki{@VdZpy++aTs?MOk+H(KjK(M*c!OXsn)@c1|FQKv z7AwJj{&sgHlc_q0&uUP?!!FWr)C)&R^# zYPs-0DvzIO_l(rwYm`5DEmpT1sAqRYVqN#eqzO4p7RPMUX2gOV)7LBZl2to*7fa^w z_^enJ?6O;<-^Iq$)5*mXNw(?Se+J8mr&$@p(rxc=Qm7Q!&Jy_ImLcgJQA)Vf9w0}+ zes#&nC^Bo0tL>NZ5gS@F&+Mg^7o34*hT{RkGT#``oia0C+EAFs{3av%kty*ajn#EL zG3~O=9Yr;7n)?YRThVu7LaFcvWf)$S?-U72#p`<~oJKKwAs}QdV?+zWkVdj;{F#uC z(M*IH;eG%4c2a3`N)g)*`t@hhe0XfT1))mtlTGG!uOoHDh8jW`90;wtjJ=@>?6NKH z6@iXcqcX*9Z81e**Y? zI$C_wRZa;Pl?I#Ea{PEUGr^SRFj%T$pPK%J;s4PMr(-Z`qjNMMQSN8W!t%MrMgvuf zM~70S-RnHNg(jQNIUk)wYPr>XBrzKzwn(Z}FE`sCOMA}qNQp)@q5N8uhMhH1_a&Lr z|IuYqh)#vlhSH~a&g6P$=o4tr3)0}gD#Nf$5X;P;1Rjqg6@?sLG-zn(Okuy4iA{Gh zQ{H^##v8UFPY|~ME&N5kK%!^$ru3hf(yqk{!h?VYehwK}cd0=_DeRgVtCt!?KHT7w zW|gn>5�|)=hk9tBT*KH_QS=Tv6l0!@RkM*ukgtp)Mc8o@-qJr1WgCri=8@ODi*x zEL1-8-UlGW@FwNnl+VIs8ntGhL7Wbi8K2VG`^VCXoU;{6#Ej{%kP><~~ zQc52ehF8-56o8f$YI8{s;?hd`24W2Q!DpkKYcrnQwU0g0Ghu83739A`uSKLeo^E1R zT)$MHzh+E1l6#NTnM9y3O>~Q!ZiY8owrzftHgMxlq%~ATgH^B+iMiDon4ze`5C>$V z5XH*1XsEyhm!6g~f-X_QF+wT9vfb`TE;`|WNl#(&Yrax>A%ZvGQoblF=R&Qf+un;=gOpB_oA z8uA(3-f2Hl?;!qjpVN7HRWz-mF){mM42vEJ{)7h7l8SKhaVprM1(LFoT%PZaLmzq; zO?|0vH`L3Owwo7?<|oXxi^rwjp8xUO^3(Mpi)&l(NaZs~_aXl={PG>bQ~}c$_!k3X zPJVG&Pqpoj(gehe<1Y3ihm7s(rhtkINb%qW@t30&suD3%C$RbrusEc@!vf>OCEXFT zaXqWblgpzJEvkyQQ)SA*ILgTs;MM036xUpQnU`42yJi)t++HHqykyH-y&l^td%1LBad2;)N2uD0XV# zc~sj%FzOqZ6_ov7sF5U?Gd|Uys*o;0?oW;oBoh366Fur}y^4Xq8j4eCESRPwMQ78x zXIVe}#q-FjGUcb*BI?d}rSJ<9su_ZG->TvXAYS+}a4_2b_a51lV23b?!;i(e>9}l3 zY2h^*$Uo<~hWI`VQ3{%iM5$q-&^+X-z$8lX8HGn`BhXI(tEY0COn5AjTIStQ6g_t{ z=8&LSdYj|;`?sHQR$%{M^X>-A^X}%cP9FgS0Op*-qO*ms4uEea2Ta9CR{M3o*}G-6 z9LQF%ddXM*S8YgI3>E6b0sCp%>?VCs3fbJUhuA-rc@_II?{af)c1MyHwWJ|+Gnio< zSjp8SlZqIfc@qYyI5#-ptRwvJ;XG_2Nm?+ibYCo13*>5Fds_)A_UEYSE^kBE3KVRE z*+g?noK1LD0U1+W(yNE!Vuh;r8`&#ypkM~CW7=hq2!_LsW#jRcr6^$ zniPpoE2*6?!=vmGxL6%-F)p*+brNFU#66FkP?!D$!SK6|qf`=K)O5c#*Y-f=xHS|e z;)LwjV_{F1|7ftB3`g*^)8DoxisU6J7BD_M^cg9m?g|I6NDqMT%~qwQqrU+Zig@A{ zOmF{If~o?cU(YrXyzhS(pOwPzWd^6k44hYN$iW)(nM6Pea0g_^??usMwSOV%ef@I2 z>3WdFwG`m$9sM+TfRa=H53EJ<^bhg<^+kDPeO4^!1MT%DZhxdnwr}C?SghX6>7|KJ zfikW01l3Vq6yw>S{pYzA^jLL1POzGesTUIP)W1~^`97VBrtz1Y=%4J=ugIH8pj%&& zB!xaI2@>6x`s95Q#qb}|Gx`$YxAk~j`#)xGOSG_!&J@{lY^yY%*edVu9V4|zfL~;$skSz|HCO#SEmzG$5eG(af&-d0#rK9!0xH_NsolphwH6E58D^_=O2dl zx&^oqnc6a%9_;U0`Tc0&=?OKjKm-xwzJ%R$_z&CC3IH(GIjfFZWe;YotBIkQh)^6^ z-Gk6HpQ3+R*bE$w^htsJm@>eC)u`)nv`iQpu73!k#PF2Ev|29bwR*bQuh6Rf#H3YI zVaQe8i^`OER|OG*1+63G0V{vN*!N+TGsVw@!S5dp?Em4JPh4n2CNCDmU?`J)=irw z=C;W7b@R+cODxNuAcKa&CHUu}xi7?W$}N<)?4m5;>W{m{<8Z>=h-5>S^%t~#F~R$X z%(gBg5EUi&DCY}O6xr`-%3xYt9ga`NVq2%Ai#UjY2v?ywg% z`f&)ouV>Bh;i1zN-@PGiky^vw&#P-KVwG;GLdFhmb2$7C9Xy`9)T7Pq zYD=3{f@}lg_4wpq(ru^#Y0dT@lUWjlKj;!xMb6P0H*r9(EP&HXss;6{a^IPkh?@A@ z^HW9*+gg>j#yfqiXxp111IDvzO01r><}^Xxe`gojrjS$kG~cBQ=-xItQ0{{;yZa8O z=!F=oV;YH}=vS-^7l;ZDQ^*Y#0>i|gp;n0rRe6;@vS^Z&ey9>@6{zagMWZzWhFH^y zgu#~VP;Y#_EYb2EQp5O`pUfeT^H~~X-gXzi9fDUWC~ZB?X40!35RnFuw%r&Z;l6m5 z4qpn3+;1)EtT%O_SFlSV#p|G-C+66OH%WHSpXjT|f>0+N7X9!|h*HRiB9TqLUqYBR z_Z5{c{y|32E8Jl(O|7H^55k{8*!#2nqc;fxpt!q;1JXB8Q#TsD(fwlZzHR9UR%Bib zKnNO69JZM>b(Pm?t)f3f_P4l-nApIJ$nz0(lieTW1Y*8nV&|@+Aikh?fB*Ys>rWs; zDwQIygMW+0i=VPt^jl0I9&18g2SQ{>C^m%8e`*bpB4h3jC(IvMzv63yz??H*5?1mc zkO*@KZ2gl1A?A>`+Blc}UuKrZhvRvfPUm<+4vC!j^QC%882rFP2;%DcymiwK4SQT% z5i4Li!I!vzSQD2T@_(qgg2^n+$5Zsg`%V~w`b&^2@3bKD2$n<26|Y99>zY0^iFBwe z&AI6L@rM>RQ0eCdoeUp|vxg8(ZZakwzDC~<7@&T(m;=Vt2J+BU|)01{+H5`oI ze`~?K$}mv_(B!4!;%!z&w|aMz{`-_Jy06QLu{*C!3B*+?PsEE|QY24AwC7^a7K;@N zQiFX>_>8;3a&jeWYq#(El_emxD6d(a>&4DOoFxcHDj?P*CVRB17FGwZX-uiltcmI4 zHZJO;G$F}2KLEaUEN$@Z@P+b`7}{4gHJ3*t^~+QW6}$b2FDH-qEKlTK7OZ0pO!oFZ zHndOy6y`-$d76M}c!kR4{>MHa-H{9m^eeesciz2vTh?g6W;AZ6jSd4tL8cHac?(-2 z`uTiebnpfCMDXGF~F^sAg(r*=7MG*f> zbvKOZ+q&%i`1{T@nZqX&0pY;oyjK6}se%i7cAsYfV;1xmYIs!oe7rr>d!6gmX2me* zYztPR>2`nPS!s1nx>)Z#^`MNWk#~Ee?dB~V({|A3Q8BCKs!AxBq?I;$g zOJG{ZMGyL;x_2Q+iF9;KWV}|<5VG*z!?7m`@Gfw$uHIt z{AYM0CxqxGwSYQ&8s!5s+VVwYaAq35@Ncf%C_ii7h?_7YZ=fhz4v=~LmrIeU=Gw-U zV>oFE1_{dGEzTLk!f)(5gUtc=uKQPz8~@cgf>tU^R(OJRgjngt3xpdk%gdvSs1w7- zm=DiPWNon!OgqNI49jFbqBmO|iyPzd;bjR(8~@Fmuwy;VsK98ypJlqoC|$Ip)a4b= z`<8R&NS-9kE1|2)NZ*eaplyRJ&4N7H^Zpf!d~uGZzm{p=^`o{OQ3OPek(J65`Zk0U zk$IiT5F^d+RabIUMeobX1B@q>Ry4uFQjkYNWS|J9tsbzZ)eyYanX;X%YqD>&LPq(y zv2+n1egv5wVhy>Oy_LvErhoOu_c`&fm&!^v*)u>jCZy_$xi~@V!s=v^eHz#b60#0)MY0QDtOW6=Z0Ky5vDH(r^s7OZjTk0e9T~sUYzjdkW6GL+@*v>KVy;r$;Y8^R{`VofWdTRg!!C zZQY(y3Hk*{9^{Q>0^N*5AP%R$djEdHd@jNCZ*%wO%SP>xo>}fmpJ+{usyx1euTkNW z;Hrzx4)Fy5;VlAQuSZsN3xXqJ;j%sIu*x6W}U>WVc zO&wUOigN(8bZmIV1hLK#wNC!lR#Q`{JX#Gk0&=rd!MVi8y-Ke5HD^naos-1ohO?;^ zE0jgv5m0q2JmG$AfBxTZe@w zrGo>KP)|{b>lyd9KM=N{uJP&EOq;x1H{5NsC}LqnnVL`oXfcUKRS&>R@rT#9-rfEy z06lLZ$A5Srp7a<1+Ks-(;7XRhQQrv9_IY5JnWW2xgHCE|F=t**qyw+l*-T0cg405& za8)tA1IVYZ^?Ebp%s&WFC{psL;DfeT;F#BYao0sJ6Ahr-CB$NZ#Aqmr;G=RF)8B|9f}$f~)dcd)xo9+Rb;ZRv+a-237K^r@PO&m!m4) zlI~=JRRn)q_2;dUFzfA8V!hba*IL*RWW^4G!Ozxsta9XS1j{xi`AbEn?H@yOYlxXKyJ3Q6x?u){#yrphJkjOUtShTb z8cyUG`+~zVe-dM6DhxUAoZe8ac>VD0{T7GVoTtLS>S}#Gb{U1`T?#TKH+uC!w3}=w zflqOo!bLd!rupXtU0zdd_5F zD0bSP?I)>0=U67_M|v zXEK=91qZwfqEzvH+Rxyj$l|m_{#&D4Qd@{9lFn)v3$dr2Zu+w}h$l~2dz`YHl&Y3U zKWA}sa2+_Q%KlfU9H)P4=TE4shDVzMO0r|^n6-_11G^_4IJaNEsvE!oFRv0;XtyElChvIG6GCQT2*=ct+Wa8FIvrB_IDn8ejjeku;FD@ zjc|jNm(HFf$l^9bL5v=LSu73q_V!bpZqQDeG1WMLCxaQG3)4P zrd1iVK46*1LFByuv({$nPW|MuEsqK*G!DN%i~w$w)i8d`xp!706Ct|K*aPX1cJ>H+~O?VEqNy3{;!POBsafsCgSiToNw^==7rF;!G6C*KoRq7#PweAt3^ zspEEpbbOWi9q)7W)3T;uSYM zI00H9RK3Yw(zd7rVgt zym%fIg%B4A5-$ZA(>uhk0-x*JV1Rc-MI?lzCJ|-k9?9h&sL)6s4F<5378^2G5){pL zf0zuN0O=4hvW?h)v*jJ=sF)BMNH<92xI>r@S(wZ}75d*!@@4x@uv^DVm=vR4@rg7r z)P<&Da)H2%s?C}b4A~N@h~D|!z7udNu$@#mC?+@7)F7*AdPJdSh;fftS)#-&`u>Q7 zJ3jMEBT6VZm*j~;3?>aBNcw}y{o|MZClJZ=1$7r+2jjEX?^vqt6klTu)&85r@SD2Sn<->R zJgaETmgYd$S0QJ72#6%ggzcu%he{fT`14%i4b_<~m+0Z;ck?_0#o+j$+jvAfeUwa2 zsTahdHlobg3YE+Q)@J!$S_V_Ed(0>|1i!H*r(oo~nd2{_zcQiv?2tXBKmm18`_+;~ z{DTgo?ZO6Gi@xM99efWmJhuT0kI2cje_2R->@}eu;O}LwM#y#h>5slB7fJ-rReZO^ zmMa81yeB>*nqsKd7{rJT1x1KD%sXBY6Ty7K6Vcx=mB``(Lp1To|FZSJcftPwVVn0D z>22=V>KJ^+v(lCe_hzE)7t~BKcdF&qnG+BN zyyIWqg8RgQJK!{kmpo8FAO(%%Z>SY> zdoz%@LTeJhu<`ilQn!`6O|c8(y8jEv|2QrV>wp&Q7^#?J6}i$CBf%@8$Gu3UGfj7{ zUe&$tvC&!vZi@w|z;^WlO>W<)kb$?dIU>~{k#aCSr**fzdM3+a6cVPT+x!~f%fHtl z&YUONM4WNLKxr||6hE(2-^c(nXS!oSEN%o8XAz$49>L0#?Zwb#EDbtAtu&<=T1)Kh zw;S1OqkUYTMk_mJhZOSD$X0MsDVOW*J{!gtH zWw#m1yWN+YV$%U3%WSK3usyo71aV(^E zK@!LDl0m8BVek+)FuFk3r#v48AMUD*NK~R?QvV9M9cO=0>LS zFW2`-A83=Z>E_uIGc-F}ANRXs|BSg+kkA8P83a|O4Q$qe@w!1{W@on6de;5;??&Mr z5b?_=9`#YGKFYN`jVMe!H)D$kqIac0<=ccaL%eJZ#%N{yq~T>q=Gy)l)>`{FU6}Nv zvX@q8yS|&Qvurgyo17a z#O6#M93f+EuIXAciouA;%WKVL?4$2?*yIA_x1TOK0H0q3sVyC+tZ024fE2kBOeu(S zUD1h4M$Ocht|J#03Frr%{5|lauhbB2z=e3Z*VZ$mZak^#D!0n_e<+sY03xn$m%FzqPn| zmNNgmZL(jmaf}cvxnBOgZjyS7wiis`wrK#(0wDkqebYnu;o`(L;)ke{dEL`=7h&Xy ztA%*=8LwYuczd#3dj3Is4z?|wmuSjDKJwoa_D+f!(p6yApijA*T9=)2=x0UF-Nt;p zddp!blI0tsz~`r*@XQVffOH%BlHV4hE(eIu+`_EuJ$~vi>6!@Ig^M>A4A0Bf3Uk}_C7QBTyqT-6vVxE<**yx9mc;(bW#nk=uiS&{?jx4E>_NHNC|#NomLkT z7#KK^8o<><6NKJ}smUV|FD~YE?=f2uRmWv%8XRK3P<5a-0E`^{+%NU5zcF?H1$9sM(lzlwjFkhT;;t8c1J~2YogclsOD2eg_laiIq5#*{<9PYK zFdkU&s~7CT{Z}4|-9QxK{r}$o|1Kr#s^!eVCwAeqZ)aJD+_{VDZ7Yg6yA^bWXZI{> zt+e;d52OX+2Of&-6!wtiSusRkeQu71Pm4xFB7O}M+5MFd<<>Jf#~Kf&<{>rPn)!CN z^rQ9GV-g*a!Z7@`#!bL8AwmWUrW<48cJlkN8b=3J*s+<^LzBOE^EZ!Ep=yM&(H~o$ za-lnKlmg#P#TQgdqLo>v7sZs*25ahxWma4D`Mpr1t~a07@-c@=O1>Iv)6k4?EAz?f zGS7vh2jjgW&=y!)jm=O|ch6^IqIJ_9k&MIF>X$P;hWk}iw1tL*plQ+;rXnndgYjUE)+!HiOLyIlNc7LyJQ6rxd-nW&(&yE zk%2VY*Y0pqT7TuihzVj&-?oJ3ygU+TOgatz{*tKvdz?mSx{_pvF|N&3Hidjc?xlhN zL{a)4M!g+H|3NvO3G-F%c_du?Taf&O{K_+Juh@9wgrZMQ(zR%c(N+s5{McY7`o@mN zV)`2B-o1jI)?~m?Y~eIn$dX%sm(SF_Mvu1{W`<<^@z^taG2<>t{BGg5c5B>d`>1Ob zBLNP^C~g1u_xNU7m54+?1qh8Cb4W45=LbijeYJku54Rg5Yb{kl3_uEZ;0mi%!uCe#1Dd(q0$&ty3qp>7h!d zf&zgD{<@c7$|C5Upa|3Xm$6`O&GUj^-|5t!8qrH{?B2%^n0^VQTaKiXfE!y=6#F$g z?I+O43m5doA(aEesChZXSasQ7Lwk8ZbHz*-QE|M2r*C5Kb6j8&FGwTmla`H+@mAwk zbmQonPvW7WIpvEV4MIar(!wcbZ&Sz>Q3y8Ce)YOgDID+JK(+WxIBKGQR@KXC)%TA=+d>FIAol1mq?2s5FBIy~m@ zDmo?M*S|*m6k+ob;VNH#<#~;T+WZuSqJB=}tWbrAk**-Fh@}Q8w!N`)bZr1jM4$#C z(z$)V&1*Kyvz3bW0_aA_Hc5(4GeA|jd?eQAu#3q|_JY>)}*NwOo-RWh)7Scdk`Y2CLaDtA3XgdqV+xFEO7iPTen!7Fxj!7Vl#5^^lAe` z^O6nhNne2Dxss2MJ_o%AT>_lbvzKOlev_b#K61k|iC&qrPuX{9!Hj9g= z-Z(`7N&8kp%U-=eGWXXXac8nKuP--eM$dzC5TM<9J~x}fCcgGV{41^a<82;ffo0Vt zW#F0D^Tk4)?gC&$j&Z=)sUg9=k{QV8lQT^@pr#Bc)$$UxuG&X)n)X7PhWEI6$86wb zGo`*?RAHD_vG{gmDVO!zMezI$`!H?ub&&|uQMyX>1PyLg*;AhejV=& z>EWe@4?OhrF`K;^P9R94886uH1LL5D(TV?12bQoXyqhF31Y^5%hUz!6UT~D$qucN0 z0N>oI5xu3(TTsPNDA-xmH{|a|jNH}%lud2i*;A*p)I`*v+a(Gfq+RHz@Tq1{6$PGT zo0*Z7wc>2N9<*8H_`(2}l)F>h@Rak8z(3`>By6X`7XyCG2p+-0PG5Vh{bxawq^W8; zVO>KBzi^^6j3YM;&NiVvPJ&3+dHEXy*)^i}B7+`%dRZ+i<*TGxE$Klo7w8*D?buhy zXf|wc=@(L5OxC7M2p9dF&PtO`dR#tcnhv)%LhMM6eNQakz^uUrN#Hl4!67d8EA*L; z`i2Pi9#@E~wVn5RFms@zSFe2B()e~4`oQx|mBwV7W&g~GCn=y3(Nzf+`nv$JA@#4# zqT$1jx>Y&l&rIEiqVb^^FCuujH^*rrSvO$RDelN+>|N75F|k3up0!SF z7y~m;i58C{A??i3Cr`A^CZQIMyL*qSYxH>w2}=VNNZ0lUd*I--=y4a%Jm(->@VmiC z7!8wN?&>jtg!%jzJrz_s8AANHq*2U_=GcQ9x5G4)x{*7yek7F!1|Bbntg!<>Q8{!h zU3PlCc|&@^DE~tHOWU_Dx@L#|w4p#jtcJLdW(+Bb>Yoe_Zc*Pi7U9$dF zdPG*Qz+0FNND%5kJS042p7Is=kbIJ#5$iboexwW*ZybuKQfCz{SuYhBh`5Bg;e2WW3Kf$SfyvVxnoyAL2a;z=|lHQX9C#k9P*-g?jnn{r(halb73 zne6w3qOj^3258VJuWkG>o6I)TL_8#%&5+Q5-pf^+X7Y2ZO z(un8N-zCi{MsUOxP`s^W;FQm;Q{D|E1Zfnb?R&qwe_fDtg)cs*4yRB6K?i07(mPqt z$1@Gk${&Aoxb+k4=U_G>TMm?>HP*TvxUVigcObK>%g)V3`MP;bBE!hlltb`jFw~MY5Pxt`b$b+-ak;($b7OP7fIb5~@io ztKED-<446UCFaU__B>ggh6}+ypZk2)I$>qgM0km!=1>U(uFUDa{H}_ z{#$hj!$qsFs03b)lrm<(P#vclTo5|2FZ|!NETz((SH1%Mm?_e9S~%sqU9L&6={ZHd z;J@?vNvC_Xa%JIfUn`a4Vb`_=RZ|XnzY+LW1qtcb@3eB<_!uJ79euCeG&#tE z56!+y_}8}SaBltU_5Y?erdNacUxe!!W2y?(ltaTXX_Wd~rgmd@p8i^LDbqxIujzc> zdf49Bx?VF_7C9&p3+P>9vF5wDjq7@zDrTFO&BQN7;=(S&F2;dVwm+F`+LkpGelB$$ z+Chf=`v)m!Y@EH}el=1Kw(Mlewz-KEcTYEy;|EW;Yw_S*jo}W`$xH>fWRi!+$`-`; zW&(h<1U(*=YIeLo359+4FWi16M%Oo)*b6auGF=2=t;!6NR7mP!-+u-xOFyukdRlqj zNb}8dgo;~MoFT%q#C9Wnf-LlST6gtjJ$RmV8&cuCs#it$BQReUxjBe1UL32Y9iS}jc?pStMkUiS6YPj~KDn@;vLbFo}aTpi>-Ss;L6)KRgEAbvwnCY^- z=*;^aR2A{aImPRs!53$|r%LhRg4CAYN@?I$8TRUDQI(_fUPYL3ovw$J{xWByk^87uC)QV_-B@QCeV9)Hxf{j^E|@^loEu zm9n3_v1NQv%uK)`CM?QpJN?(@L-p1@&QO7q|E8OC=}Pkj%O!~cu~(b!=I;c5_J48R zLBDh*JCv9&_WXz>j8i?(?p&OA@ynz1+)givG!b9s=_%gcgvNqq%F;ODhmy&knbxeV z&d93_)jt8KZb$BwVT!{1(hu#VdQo;YzY0o=fIC`gn+CT6@2FVehU$Xtspbv;?`ab^ z?$Tz6pyf{WtR42qiGbv1789trRqaV}ABCKk9!l>=~7SXST4e=^aRqeg^Bg??nWPyCH?)&j&+Gpj60urxByUM#onQ zy7&!vdjVo0YO#NnoHQ(%!lhq z{_my5x8CP-#gjkvW&q#uRrlI&LSc<}2K0sTzl)tlc<;uRH1k%pgs;)NBl&5>50_>^ zz}sv)jef2#m|wBM^mM^rKOP?{&MpB3K%S= zO9_^@cY~88HK8=u)C@Jh(07TxJ~qTS-hWPA3wT~jTsogkwNZ`w5mHaJSI8QdzSP2E zZjms7gH zN-JU$Ba(e~JuIM|jAOWvUw1HEr~wLZh7HLrxtlgAAchR&vJXx<|gpgx+zbF@D0@a)uqUTzB0H zN5w!`X{q~N{nR0;nJSJ|YHNG5;j76LSLT%`3w6o14HyF6%J~4e8Y8?L(K>;fc~q2}0_-ZRo37@|1POQ3hU9LvbjY>8;T|+j{p9cy%cXUEZUL0mEK`qvA$=4ppQ|F@2 z!vg9mg5Be^aQtto`|-Wh(k6<5ZMOJo9o~#vOQ|DV`++X9dtr!ABBc_!X*BrV^RM7= z9j^whzutYx4jn#Qe9VS(4}|q_Q{Gh3xzz*)@wKu|^_ChSTI1mL_hm`{ZD=J@W+&xu zlnidez|HT8heJlrG-)}*LD!qipQLz5IdI;pt#Ui~o@tZkVZG)3&WT4E0tg6c(?FHeZc@6+-rH+e83dv;7lTl24+?>b~(UWodYQ$kQaX?s4< zW)M<^K20@ZiZCo3xg80$C^_VHu^V;AZz5$q|N5u+*B^lLXqU9DPONwfy9jS0Y+Ys(7ONVf%MiNj;xx)ae{ zq;h4IndT`|p6>j2%Zb7w3sfr_77;n?OWP`J1w+7Up<<*@`XiDXQ>q9lx+SJArFWk~ zslvEFv6fe1U#}V#c|~x6xv;O>!=pK*x=x18j@42eOXA(*Sx;8c+?!GRdB^e-z1{5y z(moJbTBpb+1kwx(3v<+Uq$u;mk^0Vw16bxljR;+X9%3v>{c!|?WW2#ve-9$d0a8Un zEPIV)?s*SNiZ-36Q=BB_f7wRWjrzlo;iYrV`+a=_YRcmirq8W+#kxZT=>jQ>kX5YW ze-Qy(J`C&ub+6;~V*`rL%a1>$($v7f;1w3BJD$8I#|a1*w-iXFTBaP><)6D+YEUl_ znQ25Aun^j1)3Jt4jJKPG?7=jnyL#uC^xlQVMUp5`D`kQ~PS>o(mkrpZ}jGNZEhFgB{EHSGCq66b3vm3qUI`QMbzYbZ5?#NDJ zoIek;$!vbBTP_UXu63~UvZlG#oNr~w(@&+&N_*YE&cX-rWDK6gNB*^28xlJhfce9h z`1aWcv6F}5+Qk%6V^grzDu=~n$KeQ`1?OaSK5*xJ%TS=zi_pVY1jD#%6=@bZS*FN; z5`dOZIi(UW+J1YdeA6hH9;Qic{79ve6&LLN#q2yNc~KdlBQUvOBv~;;BY{l!8{UU( z`#9NCKN~s5H>>kRp zePzlqL@GFSGV77rWi9I8avxHl@!ZxR)2-kL&*E;gTUECrL|ELdI-6?5Ay-=|1htpt zTdqd|iW0@cZ~TsoUjn&GsDVVtT@*K5TwdbqZj`kQt~EB{?yyN{<6LY_|4Ap4Hs??< zszRhJedtDu*5~V;qnZvgsRW%Us`1~vac~5#`6H2=H&K-3ojQch4gq~{bxr-M3{bQx z?S6g<6rLv@;(q;@pv?X?geGD(=rR5=2cCjnt0rH~c8g7DkSXT?Q|mJs;i=2qYk;e$ zH7B(730pQwdqbgOB_BaLg0Bz|Spwe|T8I7v3o*C7|5u(CKgVc08xu@wN zOD#Tli?>I&pPUI|e5QMu&IfK z*pt42$@RYbrDr11GVJnLSoCF|X4u-sZ(r`aCWp60e#Og;V#W@yfx2ytpuhwQ?n2ew z*P1+Yh+!80mlc}>x0M;~sdB+O z4`3%Jk?6biOd{`ypJa<9BZ-lwU2IukOJuo;91y?nrPt~%$SjD=bnEOv6yLo~mbNVO z#=9v*Hlxrg3^A;pJlA5V)CB3Sqyrk*XaoXs`eu@E6P%Yfe!)Tng3o=)1gxcwFJ$6Ag>u>WRq9SmRdWs=k>7gZmPHqpc8ER5X1 z(kaIAHv&s-Mm|ynXW=ri{Dt>oF;vA|lH?qiAD}RGTj%Vny5nVi`FCDud1+pe7khtz zjJT4s3h(J+Xp;%s2rX-M`fi~^dBLW|mR2=;6NiT&EutTFpBbRMb0z{D*QQ)RvpB5d zP8mq&_$NKLb2C3`NX{AQlNzu)xM+vGs>0yswb|q$ho=I9f|G?QeA- z%hT4zQFrv&D#`_vZanhkt8#%*9@<>39qst{5-8dm|;d=mW>1BRy4Y%g`st^X?Zi@z@v!W4n%)Y2Mjf z9nE4ioN-hS>gr{Pe0`Y|@x|)g)Uq&wwg67cgR)kQROtlE(~p2pEQ_E5XjX!M{Rg@(_jnaflpo>)qEF6B#(^6`aWmIh4J$7 z;5!@QN39gT0yP|XDqfUCPkT2yO?uA_dKf?RtdZ&d=8VAQU0e6TZ1;FZtO_ogkPjqP z_*GDdey=DM=`R;2&t@;`z0Z|ls23R>;9YW(MMIM+mt=Skq8b z^Pt&A@s5CxsQd#|7FcodBl1Bkb8zA(#+13VZ6&$NJF4A0CqI(`xj3n5pwXw1Di8)U z2PK1F%K{r@k<&@}9d1!GF2d4|d4zaOMIA45P*^e_E%FP-FVXcP%qoeAo+?mvMek7= zjYU7Wk%9OcRb;r{n)qTLYb*!Pwcj^aCVspuUaUkd znbo`->F#Ug+TGY%@qyV=CU8tL00S)9x0dIzvwxD~%RWXM?0$04I(=Ag^x17Fma@SKTu*ooxBAooB~hS#XS2>8$sx{SHZ_0gsBSv#wW; z`^M2*dn*tYg6KQP-FC0!H5M=?qKL&O)y+fH-JMQD)ZFk@Z1?Nm8yH+!K)MMOL04ns zEt2Jj>%(}Y2OnN!BcE{)YeG?hT#(ClO{*i}+))C>-y*WVtCtAbic|}B9lLb8E;d@I zS}G0Yk&|}60|Uk|ZI)jn{K1?ElI_FDTi8cWZJRPiNGUB2e|ao`ABzjJugUiz#EcgV z9x>s=b*nT0mP=v6fB+G#IVVudL}9T;tbjoP0Rl0*!$cxk+8*Mt*2#AGPd2l>kPa3u zsn1zL+PxjOf7yUL3tTh}qUFm%R4Az%BP>CtF&wpC3Gv;YN=J)aa}D_C(Nqm>YAW1F z3GG7cwL!q@pVF8Yc0*xs=+o>YS9LU&a@!qs7wa7FyOAYT#3rMeoQEQbn^)21>yJIR zTXRNZUKt=4t?83B@e&nrz`9arK`m(rFsQM=c*k1f^-UGEp*o6s{^+*S@lg$N=nrmM zH@2dxg9^XYBNIH#^edJBNl(2i_JeS6{38mw$Om!)!sn+pa5}5U+fqqwHj4aipTkdU zPcDTJvoYqn{u0%tI&K5qT=R5#6M`Sav7W9n(9SCYk|qtt9_diT&jeou<76br`@dP& zklV~l@44J#(`_S%w};TzpZMdGE14vzb<6$5M#mpx@!gg09(`n0!aa~1vUd`%RnjoR z(zb{VP`UTt-bI)CZqV(dY=p6&gQhZr@iy_bl*<`nq!Czp_QYJ8BY> z)sAT*gfK1jm|KD+&7yJb`vpy}d3K`x&%u?Nt$JOeAB)Mr!r1gTI3u`DysQBulB4lC zZ9Gh@1bJ2wtn7R;xg)Qm-_$eDR&OP&-IPSiG12D4lL;D)2eIdPPE*Ic9J-$le!|kl z_9e@OAB+A;AQX+e(%LyQRNn>}3frmYRcrmUjiX2uJKD~{{ViVt0i%>w9+zgWmLtp( zd63*mh?PfU)&Wev65Tx{O2R~$yl)l@M$gnJZZMzChplq&chtRE-*^$der|a6()KKd zrF9~PdW=B)$f=8v(sl9zwfC&rI40JXH%a$R@Z^*5`2ARdk;v}8saVq4nv;{5&@ge#W z;D0pGf9AN*_lDoOi%ktux#(NCjqOblvn>4&n`M|Wu z&XY3&@Gyw6at~ZaP_)h7$YAKY&=ucU+xee|px{_cG(&y^H^Oou#Klk6-Y;X{I1VKR zHns~|aI}N{F@s;Y0~YC5HsAD4f*3w14ga&E05q<11w;AMPz*ftd(ceLY+Lm*{Ch+x z*@t}oA_MXL}VyYi&@<=UaFbcJl66 zN>3XgW&W%0z))a%R~JqmjjRjXe$ouvTu!hVE_BXEU^B+nD#cYOhTsM+?cqoOoCgTo|rwRtH z6WA$D^CK3o-}M+YZ#sntEv7PXk?_XG5rE_ykk^VR(yIGaM;9H!3>lKJ;`}$rBf3>Q z$lumQ_)_9K{c(2nukzJVvK;JU3*1>n45&D98j9XNvf&=9DxjW`gBBatzArEv{`cSq zP<5&0TU~e1{a>CupYVbSg26z}fUwf)#xP%H7!5RXw*WE!oS-%;uu@GONKO}CepE@w zcZb1YDKvyxMuhDAYen6ht?R0~JFGXc_1C6}?}VZT!4Sk96~1WKQC-*2%yIruNfjSp zEk3ClfLPs0M<{Ue!hd~WkPB(Ws=0EQOA7)g!B+Tip1Ssdo37@${~COYDzbg&7wh7g zVkTSwagp#T|3iUUKEfAr3<72Xg;ckzBJL4PV#(_YRO`7%KN`PJ(sW?vl_)qmb2VZ5 zHv5NEs{?z$uryRGp4`l`Rq5z#HgrcWa5+YerB%un>EQ~`f_V%hdxyf!%nFA0RKqHJ z1E!e8ijbrvTo%j~=CK>Yvvl_YcP{2n|U&j)Y^5IF)~p59F4h{XY>8_7L= z)BIu2buYO+6G4w7A}7v~>^NSa({2XXP;BsIDIw#ok8?I!WCXs)%qOQSA&2$WzrxE0 zz^UWh4`x>0Cyd33NL;mO^qXiA!(NW~>=;;j8R4yMlcc-Cm~CwogfMG2c!)&dQ3JQ` z4?i&b2f^qS&)3y=oFvJFa@x7uQL&5yZB*H5*D+TADy+9`Bq>A%nk9jH^?|Zq0$KGW z(cHqAaJhN^D#NoPvcL*lsX}`9ya;8rwMti%sPUzy>I^AMUf#0TJC8gh#;62eCe9RW zRs@$`Ibp>Zyg>%&@;|rxM6@WnT)A=J3RLaXFMfLP&~MV8Szm8|$oMTy{W&*{5ucZP zMmszVy`Vfo%}`~_3tkR}PH7~XV`FJs;C|kLiKkKDYUxqbgU&~+LGLsikA|137>#{u zDa^#1;A8yN>u;YV`U4{E3%d!_BxLtPg@K*!un2wq#M+gmHNU9d98uL+8K+dB9M8cR zEaz)BsWdP*jZk!`a8LNcEg(VCD!Y-w|AlZp@h*Kc@fq4Yy7cj-QV>; zBGq%bTJ`>(6_)^d~}t1SDO z(HGpvx)Jd5_VMXyx>%VMlR`vScpit8AM;{&e3o$Ybu`ds8Tfxs&i_Y-`^%B->gNkS zvOi0R&I@DZ^+GEKqro$4GsPcKl!bdWP~8lHpMxXXR#IV!7gb*4`ysM7g*sFdQjGd-PCO;bSa35 zzAcA+5AOMnVt$vW%B$wGCr)|%8;+$y41FHa=ar_O$GGQ-d zR#^trqZ1JW?1p6(u#LrI%vBm8Q5{Zm)My7&I5KWn`OYwH=O4z5 zbVya&kE-FL<0$ATb@PH>@~-);KeCP<8T(xk#!`wsN1O+W?`-yj+&*4R{vBnhHhF%$ zj-zfre-Hh$ZzN z_D+4B4OfMux}FGeS9U@#4BN7F!`p0qY#{nHX+Y0S+Mkj*W$)pOGTO?eFT`Z2lX2!L z%%6$(CLaRb>=z88O#AP>WJD=AJfJj^ECuV9zHMU*rg|NarBnjI+&ij2WkMS>Y2>{z zfjk-Z{0eg6hrRn*{@zT)f+7o2WZal{^>v=8*@;Y4>{^h0y=DJ)kwIL=WI#}@hZ>%R zKpw#%#e-UkQZaC0W2cI}*R13|7ZP{@dOh;f=KGY~vUN&eQgcydDqO;@iJ)cYR3~+U zmkt5f2Bq9Q{>jdW#=O|=*2i1$Wy>rZJhe?`&SRuIXc7>c>sh0kqW&X@)mI9;e zeDglKB#_x=v|cp+PB5IH-k-9@_HcMKn=sifN-X_C);U*A^8x7;%(xAM!0`ttp@&tY z>)zue2SrtpLPXn@z5PpK#0&yFO8|y$URV}d0FQ$3`C8RgNskQsp=nye${v?nD%Z}4A2_@x)?No1pnC`mY%v_OoYJF0ZbOa%!OuW zSEF~0A2$O7;mDC3fEh57_RCX_-XZv$GgHRm9(0DP7L!=QS?Sz5$1pKq@koeAM}6Ns z2VEVGe5sVFof~ewO`D#4%kWgEu(c{wfhOJ^58gr?3+m5Dn9g4t3U;&~w{Ce}#~02f z1!;XgI(bruMat661wu(6>7g=B8&e6{&rG~PRysgfaYk1Cmty_!H3}3#J2ME~EtK;s zVSQCVit1L;DEc*o#wNO(xa_$Xm)KsRL)&K>Mq?ny!_muDjX>5M-ON&U ztGVb^sZ<7FO;UY<;$ii!!PVi=z+#g&L&Vjk0m4x<-FGySwvWSsgnQm!@8#Kpd4~c% zrM*fO4i$&?UV^}9atc0twpNygHv(P#1Y-}|BRBM~IOCNOUmRw_iHrmbEA{wIC!mse3?3*-clYLkVK3YT4asTQflz%8 z`bb(EYMwbKh@g(VxRCOoE`XY_km15ziHX^66Z}2XStD6~q0PUXOc7tLay?m45 z>&gAbgzgLMMF~~JqMck~s27EWcZbGA@5vIbywgF-_b$5z23IdqJylcI`~BG?({&U^ zY1fhSn#@ElY+eJOnO1A=7_s$5PnzMN5K%AgXhPsZ@GNTV-nd(*-CH zR^7n0HqS#Uz3G2_6h@PO&I*JCEK>gcxGX@1HeWcUY6;jbgz!@9k;8_7@vGwQs3p@# zp4gLVC3=h3H}h$i07r1~SjG7KTKY@b~cJnBD&*08Rx`u+Z;S63i;h)ffx zrSZ9>)bi!opBl=vLr5|P((g8P8-M6PhmIa1GO?m8Wo$?jUcB`!L1m|Y+P!4|Am)j8 zd!@r-_Fe$^4hgh4CQwN-Wruz9p5H#0_zz<6%tNzGM|jjeE?_X(AtM1eXvA>v=`2-W z%xXuK#*e-Aayp5o6t8r)?2dTLod%bimDO|AO+H|f3B)#@zR#fc0dcr4YrDX8Pr8(! z3ZYz;1$tj^pZip?oLcL^sPaq{&3hbsUxfdrpm>@inD{x(9J(Ca^OG<;v9`jZ;tv<@ zG}q9Bp!3Hgg}3+b{9K$&w7xG$Zx%<+?15PCsUfP4Z5$p0rCx7-hzzVBh8{a4R^=(@Wx)frbY)_ATa+GLKxHrj1h{H3`I) zLH6oidpVL`bFL=o>%9f)j?xUJU`d@ZUIi|JwnO!l&Y$E@%0mI^a@;|u<3BvGL}(Ol znvj3#zismmAhngM!aC_jU#amGhJp)@1$_G-Zl=7lrkm<++ykY)?UBsinOivAP`*bK z_NeOT=*sGQzVN}VQJ;TcQaOCP0&IFINUE1rgsLMrES`CZXD-zAGwPa(6C!JyQZcz| zq3C&G=8FmXjRE1b*4CUdR3N8OfXU#eqkQYFWp9@1 zANtHTH+YtYDDl|Q8pkb~wzt2aLe=l{Ry`&t+5hR@7N3Kr7C6cdN!&RHN1sgHIh=kfy}V8fHiKrZ+al+=20;J!c{Sx2}LF1+K@w z?&GZXkIujZb;8P3I3I4N%uUH-hUf;)i#P3tA0Sid^JZ3(!C@GpK>0*eBe})U&MiYH z9QrNZ|3cOWkuBBK5{|(HE!@}2a(Dr!w*0v&B<|wWtmt zihZKvCgr}((3y26i){HxLpSgv5hx{go}htPH~%YY5T^4=E>zM0Ow<;&Ij`6~tw^&v ziP!VdHd6YwJc*Luz0rJD$JzWaZG7>QT{JTD8}r%Shys7&E$8y2HcNythA-e6g-FiZ zOkYohU=*J9)w%aAFs?sDVlbRu)L7p3L6E|Akf_JuV=9Tz$0( zR{v4YhWmxFY8BNoDSZmvFuEB7awdkNcs9VQ4d_d}df3gQ0svm%WBhsa2%b53 zI0yp!ew0KG!F}@Vz3o;nbXg2aZ+6K>$!&oONO(e~3*_t*xkfL3*P0L61weu!AP9m# zs$H>4R{4%DLa_JsQJLQov2V0VSl|FYP=A~D2Lie(^Ql~kdnvt5cEcZ=1Fwveyu{RA zeFdYUs{MmV{hnn&43rJrR38ong9Y zhbZcKm8O-wL_OpbsAM8MOZ&#Vk5f0HI`c!OqAQmfL1|#l3ejLAa;W`xQHitp%FiwK zuNY7z{NIo_K62cV9ld_XZZd%Cc%(>hJrO+Z&JMk!>2DpD82^priTByWBA4LI%kdM11xM3`6N_PfN zLkIFMxI>0#Ghhc}TQ>1Wi3I{6bRo}q^*PH!>iM*ko* zq^lLfBq}w?T7aT$!|+LOY)0JogZLJfm#1^tN-zldfUF9^qs2#7iF=G?$7J5yN?rA} zd3LK*<{oJ}6IVCtcomUCswhhq@EW4^lsC4xb&y3g?+*Xiqma%ExZWTai_;r#Ye&NC z_3nj(jN}e-5U1vjxPYb zy3j!?406ebl1C$6emqfOIsLe2qB?F4Z9;zvUp(tsfH!)GF%l&VJ+;0wmlPw7!u~sp z#;}H@Kb7`d5GU?4q+sJD5PTt&+r4ky2$8AAI=QdN9HZI!WGxC=_-BLc8Se1GY(@Ie zW>q9e^$(3?OR>=^ z>TGdnAhh@CpZ>~m$E??sp*&I?+8txZ&2I(lerzm{DRnzpZKd9I0v7?h~1 zx=$mi&#PSIhU!}nM+yww1(ZQ(Kkwom=}cM^`j=fqGF40sv7rmn!`oFkDnfFm-D|>_ z!s|sVi4K9$;ELM-Bb;hjTwn19=DTwEvG^0D+G-c1RcAGvp5tZEY>7eO0Ms`x`oCa4 zVxPT?CLshRJI-vR^7(#$TvLI-#}+HWDmCN+W$6zxjIPjH!$nbTr8o1^Xr35|etZ)} zjbUw4zdyX`H6G#0Q7v}H9Jk-UZC6B`)4b!^bbYeBwNDj03FwqWNDi+RaQB-U5x@d&mBJ~u@~z8iF2@Y+l#(~ zoIEjF{2G%2>-j-ii&jS*>{oI*$!6zOeih^PIqJwB)FyYn%5X=CjwAwYir5Np zQdHY-LuxXB5`%cb$ZHS+UtaKVLsYNG0XQL;$VW&b4uT{AZ=YW z&5M(^Q!_w`djAY8^|2j5d~?erCLAvB@sBK(#gdlD0C(H$$&v~PO&jA&d#O&*lDpfq z4gQW#ApDa<%M}GWz9bS1D*ejLP9vY*36{Zg#H-;%Cpmmg^NzzDvm%qQhFhmQN`0*@ zt>vDeU=W@6&@j;}4 zT+Y+~i{nOTINe3Kyut6;wyc9vkCLg=vd#~Be$PPw5;)<*@jnD^9z^uvbX=d?n@ccR~bQBIEV97!gPK!k}-T9nc>?u;MuJR)s&94~c8b443cRMl8x9 z_FKOQM9Vz9%7e98UXqpW_1aLpAvnj!tC4@}w$~9A;}gcL!XnP3kfy5 z>{*Cwnoh4HvqM_mtg}C(X6h}7v6@X_BFW$r7R;|t*q29kG+PobuLjX&QU?vEV9BSj z7H;^MIiY6Zt2JnMpSO&H9xX)kIZ)g(Ma-H8TQySY{gi$z)>b8l>`UScJ2~S7tYP?{ z;hb!=e#vR_{laHhZqm~JJ+NKQp8M9}+mC_+OYE{lGFMescvtWh@tPYAu3^dIU)FJf zJ975Pg`-tbqiX{I4x-5yLmuxam7x|8^dq4eG58-Bj30I?s{|hJVa%8niXZ_8C#Un- z=eukV?k_if=SLtF^KVz-J74qMICv;c%BA$~3&U!BR8*7X>;)Vsmgb``4f*-IVdrAW zgi45$o^M@p2+?;5rt~<=p{bZN1ak!O0mNfz3B5G->{o>m;?NCT+TQuItv)_~|ORZ>okuH+H|Uj)c~h-;k?|FBm^rz_gq{nwY#!VenF zqQh|UI1=~0Hq%t{twmpsV+x5XxS&3bPZH{{(cbeB{(C{8S2#2LOD8Ehr|&UHlvxKU zK6PO3D6~y>Kv0zuq6cdwRbKgYVzOdks>l?L1|9i0)_oYuz0bc(V*`gVnRbeLDwtr| zDYdTG`6a0<5VhX_FP}+k4j+E7;y2La6VS|-1VXlpKxiiMeWe_ zM@5<#y${{wp9u)ld6oNN4%Eu)W7$bq|4wSR7MXb>FFKL777imwq0K}5Vi#iKa_OV0 z^o0+8OfOASN70y4r{|n-+5~z*y~kAI>gJ@(bOE_&I9vZ@2rzA`w}%u#qhA#FKH7}? zq;7nVnK-GI3eNCsnIk=<_i_druJK$DZjQtq0g;?({BRK0r&999GGaoi?_;Ou*yFn^ ztA=itt&`Ts5Se}~JJE0dEb@p<=j^~~C1|w$7Q6_HW#@KftAv(HP@Ckvpg&D!jOl(i zXlDPjP+c8oD`Nrs0mLr7R9)!v*tn0KP6~%{RS^2=_S=Tse6!#ayrdhw4c}KA2a$j? zVCq{4Gu?9GbXvA3nY6aFnMHYU<`N&8vIB!ZCIg-~T?W&j+lrkc>AU_by!? zDH;$MVjX%WoFV2<`zpGNRjA1Pida4@X)Qh@zv!;;6EczKylLa1>AgtG7&erKijbi# z5P#={W18s4zr4`^SGWgKj{jwc?Lynps{&^IMxe2;p_A8wnuMtGL92rsB`>fK8XrwB zff0B2Rz!7|Cv$0yho;ZBX-*=c3O!M#rN|4n;8HE#(zSQe^d1^HQ7)hF8h%X|niT6rXka`LBCYu}K$ii!mqVi8fazuLJ+c(NuiTb7$8i_!#n~&r1Tm^E!ZYgPZuGS6`0eoRxtO1&WEouDyl4et7<~}U zdJXx|^Qf%CfJUwT_r|7Hg7zx8Y4cd{^qB}t&R=kh<-1%j4{_Nw5dTBs+KfDFrgakq z=@)g%M(!B9Bz@rV=Cb#@Q9nFn9?q-Z#278SJ26;nW}a|N;*1+# zy;k$2P+~~<6d3$V2Ij7hR~o&Do(x)CRM{}tzU-q*DwYFoNn7Idx&{V7(=VAT>4@G3 znM~(XeE6vLARd72uLX!+C{1ZNK`A9r*v)iP+!q-vbP1wHsX1n@*ZUdHon@~XILmv1 ze?bHCh(Q!r4PHt}hiP`SX^j~#Ga}uAb>RXP-Fo3?z$*lJ99u{rpyZs5)X;8fHcm%( za(CkQkSk=dXznM??bhc=_s@!12}_^2Frs%vL4^19Q$gHutlIA4PP2=6i_I4GFp6i0 z=^AUfY|HOhnw}^ZcD^A30o&Q}m`Oxd1bf{;XykbNe9al@XO36$*15l8JD%pxQYRDk zrrS&3X+oJS3Fo6f_GWzsr94am+yG4kW$GK8#nxF6t7sIF7bn8&Tg$FIK?wYqC_L+> z&a{4VMGD6l6MP>p;Yy+&wc5mN(~BEC`$e?D(hP%EO#}Nj)R@CEDBTeASOK5>pk6k2 zPIx1BstzZbQ06T98P({Vg2MNLkHP_MPOl^6=D4pf$G;&^fv9P;7Fs%O|Lo#+t9&5^ zAjM{oig-bP;7|0n_Dj~NqF7Yn+{B28LQe9kAE5%ecq#Q}GZgiFu4*dhAoWrLgoeO8 zjBK&*zcWh60?P5Xki#hGBqVXzA{dPU%jiLl_XH8|joz>5y&^ zkdOwYYmgoq>29PHkdF8G`Tm~u{spYXIp>}`_P+L7Lm)Yxyz~!oIkA!_^-nEj)hTqz zL97xqqH{HrZe!LqTWF⪙XzJEL}Gl)PtsWAg8yY3H-(!@QR__KeA0Zi;bTQc_sE- z-+5HC@a&#=*^P>`{a_H#`%hJphjzM{QsABxb)b991s zjyK;`m57Q?Mc*>os4FgH&SAjB{~1J~ySg{Od&?9*cTmVc-!B?AIaXyF91V$EJrdn| zigW&my%#`ccvV~xTM%Ztgf*GIqm!u&oN!*NX7VWriHilXldVy`GnXUCNVHY#Z~dGV zmrK_gSz*~!7?T@5t5rX*svf%OL^6=*^zlbzsKkS|&55M|TQ!|STtqnFw03%aJ$~5i z#R%y-#hGzA7ODh@!Q#d!$>d)Ya=%A$E*V#BjTWD_2Zaw9t#1nfkU~}e2)9vo=@KK7 zaI5zOJzWOm9F;j}O7)CWG0Y@d4P_DNde`Z5HtGZ^GN$E7X*hogp&qYAhvW5en%c*l z%+>P+X(@oz4jg@0l~Z`#;%6LvWbU`)a3MT9U}!S2tmHw)^w*#7 z5>ZFYyIS%CJAUCjGfuHlUc$?R#ysua=|(_{o5dv1489046-Fv$0(k$DrB4s@6A+#^hnJD zKJ3W~KitAT7Aa8MVMO^zGa`E0f5cI(1dg7aoy&ooY?hkF-Tj-(l$ zl%`n@#8Z*acz+(SrOCu-XMmm%u9vIP@fC^gs~EHcFv(JXVWUh?%lW{7Cw;ex!n8}I zX?m$z_7{pDcs=BkdpRO*JdVr4$Kz)xqee!N;|ncrrD*&|c#7Fmdf+BL%V8T23VeI_ zq^06S0iw-f>!10z*yJ81WNtN@K9G0 zO4X~hG-)FHT$h?Q=2gv)K>^xMPi?HIGXngLkgHd3dC#E9Tb6i8Ni|L1Sm$inV_cU6 zyxUZG9UL{Wx-8TPhTDS&Of!T3P}XFIr$-&ErYYnxo8Ev;cp2;UzaMe1oSL_h?>-GV zxYdi0B%R`ZEPp6m`4FWJMX`$x(ndtTvP2qrNEs3|kvW)>-s*Ph+b)c;`b!s{JY>Ardq+Xh%Bk^I;{M)QB(OCuzn1J+KfLxL_*I?@Wrv+xTrWB zJjaThRr_A;UcGz^Si8ftx<3tjNio<%8M3t+;IY42Zlg^&-H1nfGMskewDojr^GnuR z#$u=!fO6w&KL=t^1VEB5Z?F7WYR4XrfyBEmjoTNYDgsUpOc66Uzv5dGrNe`<&W3$< ztuyr{sMH0^BJa$V=&twil};gQ`WiIw6QOtTqkt04Hhsff-w4w$MdScx*`ci#+ig}I z7_k+!V6$XdiwYG9e*(ttK+G7rpw8Uz0=GNohs$Y(1cfweCDO|*h^)<`s-`SiI$Y#G z*HRs%EYYY3`lNjzoy=vY)SNjc%Kyk*Ddsql?Bnoa14FmWvJeye@ckfG!;Qy~esYsd zJf`UXy`vT3HUo-;3I(!N$W%w=NhUov$YZJv6}1j?W)!?fXS<`>9?CB&2Fqj$o#N?U zJd+`n<0A;;@s)c0uKJ?5Cz`Ivv|RG5XvJuCM1|Ye&%-%vWoLRsx^H1bpKRx%uG-6+xa@^!@<@frJkq})ZOfdci+--|)@^E@R&W7x;xOGNUtrOB+h z3LQp9Z;Ljat+N;Mt8{1#&oHgN|`ay=B7Vf_2(&6*Wv2uf@Op^-&UfiGUpN zSic`@_X|q%#6fNkMeJ<+f0L@{Zd=?~im0SxiE7B*LB5Q0=U$MgM^K?$Fu)HgBTH|& zx8*rKON$Nlzau)k z-X6%UX^mjS6AJk;5Cgyw$Aykr~lW=$ph!U{de^F&N#mb#9@Qz?JSGDv~ zEqJD<8#J%zfXCcqG5Er%8!E_g(mkB~0)^j#xks(^g=ud&Hq6VFYBmoR^NnTBY$el! zyzJg(4;NdPdblg{nGA+o0V8f<)u!6u((YjBSiG-x zt8JRQ5?Iegw2fmOeiS+$+GaEG-R&l zuapXPcl3e2g`KQTtHOlq>szAa-z`uV;Fbv%obe9-^%2ZQHqFrNCn8QMP6}&;%y+K1 zk_t*Q;b4d`yZvE6E-cSg5r$S{}|QeJx$$d4WW z#7CnX$W0695yW0x9$X=%bIbWHDpJw^PPNvYI*Qv3y~v)|3!{fjO_vN3)?!Mk7Vo3b zTrP`^Iogx;LO+AfB)o8e%$-r*d~Yg2t`3L>l^{Gy-r^DDg_o$CXW z1zYV^^0tiFJvORBI~H^+g&XLj$2g*obQ%~IGAxp^zmDGJ3bKK8tJm&($$OuI)e3=? zI^!(MVXA)`Wz69`QRT!%49@w5Oz9gGOW!S+6XkIxf!ne{S8I& zPz|lG@GDKNjep}i&n*!HgeM8B$b#38H{8`u3#hN9_tJKx;>Z1%me7Uf6I4|H6(kh)s#im3?|G;vwPV9`13mQNBNSAb#W z#ouM*W`;}};otj4N&h$+L2Wf?%Y289mUu{BIoOosF~iTm<8$uoG_Mtig`TAmmhu6# z9}(?bTNa7hOo|-|;_He5c`D~Jz5-w07=%Qs`pvg5xT+VYh5vG+G1x|~Y_2~r-mv`* zu^aRv7#J`HX+93;VR((C!XMB)@RwUtEj=4tcXSAT9=Z(Kz&->{Qp8C)#@@QIeLnt8 zPr4>f(HY25t(7 z2CAFFjnQxwc&zup&B4;L-o8^rVkrjWk_cmTVNAr%b@{RP%}=Yc$#GmVV^zY-<>zzH zhD1{gf6;k(iKKmi@^&zf)M30`_Sf>_Sm@j~c6(p0k1YWI5I~CXdx_;?WD=S0`DIR$ zZSV5I8UQhy5mt&T$R~3SA2&vSb`uw*75HNYuF;UZkq;#$5Qvcub7eyw1OPb~5&)NH zo@bD7bN~xx3bA<1&%XWv*)Y%rZl)ssw*v11A3U(AM0Jd4l5Vj%m&h2NdyvP0~%@wMdfs@$n zPu;;@yaarFxDO2~x`FuARfFg1hZ(I5BlWPvPk(+Z-dtzAkq{xC8;|C0ef{Hm`Gd<* z(ZaRMxt)&}NS94mn+su{S{TG>YNX9?0T#6o^h~~WnGR7% zfVE^ulVgvZqC;}H;+}Gpxs`Dqpy0iHyZG_FlwW zk@Tm4MB3~~RMpS(EzRR+^V46RU0zft$URayU;do@NAEhJ#Ds@Z6u!@ri^N<`q?7)$ z`GIA^gZEOetT;%1UAF266Edgj@228{5GnpoBOv8&S?n175ZoGy-3LV*K9j!t)f?f@ zh_#{f4Bv>Lk~AdZK>W>BPAicGY7&`{wH}@?0v=me5}$2{GN++#s+^b&`EE+7A<%F2 zPlXCE2{I6x34Oa$8JSnjm2cFkdj3MI&Tgl`Q!FhLa}f!{4A6?}KcoIXI&e zI5rIxIP*V>(^%ctXg%CTLQp z>RHJToRZ!#g^7*LKqt`UZUMR!S4^FOc!z;Qiw&O~Kgq-zDq^BI*whLGQ~?mQ^dRfb zSGgw{SE9t<6V3PB#`aboBstR7G<#*c&nhAO2!)CAP5^ui#f*lHeg8Q1#-K{h*6 z8spZYM!#V0zR36d+(+*W6UIJmP^g6&o?c-PATj2nN4=2q`v=fMQxH); z`Ck3f?8n6QhA#|_*bVz?@&^yGmg-Aw&72cZNG1U*5foX?fR4BOjHl~%%Pk=YY4VAsBO{u$m?-X!6- z^z9R;yE|uk3tm&{3Sg9`PL2rMBK2upGB;5(nDEP`O>r`8%** z4)hsCmr+R%n*+G`AL0@b;4mx{f8Flkt8q32XZb`$oh)+Z8*Q!M0_H%2>;n~vLhxcO zsq=H&CAwn5hUjUT_yyT;3S`e~7T;S`&b{`oYJ~4P5KFJO6WRIsnq9uOt~HAcC_LXo zIbK2$afQ97E1*w7n?xQPJmguXRYS=c9!@7@go(S`1q# z{BX0OCVQ7*8X*xu@3}EEg&ys$O)9(^A3Ct?hdUQ3^(4RkSOc!q#d&J6lH8TZYjisc zyv5l)W~Q|*!_Jn(?jALlxn|)|2@-xK`9IJLGcWU z$a#A#+dPjaN>tJl_aQTU_+|2pK|#2c@|?L+c#&7POOvmOm6`2SS(CjXCN5daWJAoM z92FNgCjBQnqnPH?ZCfq^&P-fR4>(?zCqJYH66mktKH|kZO4v~^jY#qN8COTv?ynhY z%}~KV4S_Mr`@u@w8NOmy61}{8UNJ_O$%ls_4aj6hX&$r5wUNmEvWzCB-K7Q z88J-&$d7MolK#){B$M|=JNB`UWRmWqY@17W;kr6<-rT`g_`V_5)?Fk1NZ|doOQliE zGX|yj8D>*N9Guk#%!XJT;_1gPX$GRc4^3!(Ufh+_I^-QB==&~C6qM$pam>_CFvfnL zF;XV8URxEzHjLwlcj>J#H81^j?K6+Gyl^Jr{MYV6KEKr`d6G4#M5T(#Q0XysFodYr zNV#S>q*bVhD5@+Q@)T>N$f*cLwg6*L%h`Pxv2qa@k2-4D_CiR>3>amx)N)pQOGY&c zYwc6u>|MYd?vuZtu3eBWtDflwfw_uyzj{r! zBmoE^YKCVcZ!1Uvj3*YaP-bQCU>1>b8sdRJ?5&_Fg4Xi`Lk|cwD1d{%XYX+5 z9?|fk@Q=F!@k1uzlI)fxk{Jcpzms^nht+`r1hvwe4`r*Zn7f254b(TVBRj;~lIWuh zWlY^WWE`lza;r|;BTT#`)iUU8_zGMEF^VL&SpN^p)n6JKa*s~q7xXV=`)IySlp6y% zChA4^p?kIUCZKXHjmuz&2-*SiV?;Hexkp2>Dbg&ypw!5WK(c6d6Q=sYn4+wc)%I%S zk@y{C@>hy|bov{Wf4Fm_XFXS~LsRY~v-l#+0qE>e3%Wp`dsbC^*h7G3ixjv%nH6vf#9!gJt z0rUh{Sf<#z#iKcbrYx2LE9nb-cYN`G+bnPv8Kif-KVVLTO$)d zrV(9KETMqwOsVc)xWz}0QO{mpz)Q*8Crdf@SerxCRX*b7%O*!vvI8gb9g-EI=T zs+xv%MZg3xkbizH+|Ur(Ay#XafyqWPdtOdBE6ENId>O6|La;%VN9KC@6iin?vQ9S& zH?;YR^PZ+(4Um|!M5VC!(u8M$n3Z(vWTU18LdS;l5W=zy3gwK*>endVF%p30bXMf#z}?qC2iV3K3i+J6Jou zXnY@op2`2KU7(s=3q))O+sAa#1YcO*aGoPVRwz5!Px@6X`X`eu21vi~0VLlnNAz=sHFJyY_Of;Wi34RMlGAQ{Z9y0=}IuFQc6QX zve1_NSfsZF4fdF0o7h4)C26PWJ@zixgYy@VLvFE>cBFW0LT^ z#}^L9)In`ZA$a$DfdgKB|Ib7Wz{_GG{eJ8O3f?BVJR!u;H35{Z5ez+2!I;9hOvZn> zWrdqEf$Jos4>2wFw0*c)IosaIn{!yh-#kwUMpb_VfUG ztb<|HG_som=PuDE$TNCIXY{nT*%L|&xp^CyVIxxK-HB8(`gS>!9Y@W(zLn9nt+NT_ zf2Ulhw)sD%#$vnJ->-WEJSz|F26$UMTVB3jem_Y_`T{X*81N`A&A-I5GB#7Kni;ab z_{~|!zF(7E%TA9Kl20@N_mf5J{n6tMj|ag>c~$Pn#LHbsQV~vg$XT6%v7mmumQ1wa zy4XF$nUDqzI;`A@$!M9IHt3P2u;0a-k{VWcLx_y|0AL86pRhJ+1x&#l=={%f5z>$L_v)T41Ae=P;Lq|a$E`6rC; zc0d917pTOS)nV!$RbJj%a~@^3$f_Q#x2BdV6kG|HbN1{1|3G7>0g>HzSFaj^hht7w zyk@7OvtR4-I%x^JZ0 zOElYSx-}xZcZZ9~8e363(VirGfTOoc03~(5RlWRN^4>=qr0Z&MO_UF*7*x^J2`MP?aIg7ITUEF%1oJ*I=0sG}N1yjA}0OT7rXQWvLF&a`z zRa<*cPpSh^XF7-Y^Gyw{VZp(iVD=p`LrHcOObpUQpU89k0nP#E<}96FAZJ*P83`oB z)10(%Mv0G(MaaV5^WXDUHDQ;aUE99@ zw$v^OFb!V~-ai?>L{4~r^kMp>OZNYoR{s|ntaTdslA`19x7z5(1UjHIR?+m6GG!FcC2nn7*rEd5Nf=1qZkqbniru`)?RY3;~$$Ce0 zTItRwbo))boXU8&U{KljmlZTdL3szK^v_Q^f@T}+WC$K1y=-vvOgU#ay<4I>&uErbhMOY6_ZZc1HMp zg)MK5hfMoY702qS9hDwsU8Lrl$bj+OvOi^I9Fk zTF@^5&*ohXN!=Q#`jIPu&1B-@gR|Ci^)~gg5bekYEt~ zzW)oY6mA3&?dn2=dc}j~%#a1#olkXiHbOf+t5&X*6?b?n!~m)}gGEyZtJF>MUl3Jzpp1<&LEuS10wM>6|9KF@)%or{bW5tDAx^M8^ zN@xs}U6r+Fq9#IixROx?PdA|!%6$(ciC0Z2Jv|(Q^%#%$pg!Zz(v3hO1qY{bYqqE^xRo^V^ zkHuAUOBxvP#ChVd79058N#S=7iZ?4}KF%M?v<1?d>Ajb;EP&xWq>i6oT;SA&Y3wJT zH9E;)!kw+Q4AvKwCENA6?!(sI8ctHQCsF!ZC|U_(tp@ z=04@lZv`=KxA9ZH0Eu8xjrDH(TG`rR6AHPHhIC`)Z(XvKZv3)5@OfO+P4U1HnUaYM zi?ka?y=VAF+M$Q#;&bft&ily=86EyF9GvDWA1V6%Y`dZUX?f|LPcNOs0v7lBuH+MO zXs~}o@_ObnskR2eZ%m!HkvN7_M ziJ6fgBAv+BU-b^oXW;(=|6 z`EW^t0-@bn)gPpFk+P=92AUo-^hK5~mV4pc(ZX>^#h>32w$6N`bivRoGbof0QR9!t za0bYZ_-euIKq3Dxeq}m>7u676o(U4ZGECyItQ` z^^o5GG!Yj%7r83vj{4@5hbYBz<*RLlmv$Th*Bg_aPvUMR;=Z-*TP-JNj=1DBcHQ+a z4Mf!{qF2y6>wW=ICh^JtZ-8|DHS@gv_Se_No`9RP9aybNa6H;1aY}cvvCv$bcnRw^3vZ$~`)X zBQjDp5O4NYwb1u{cm=((i+;928JT19_kH(}(m)k9eA)wZ*=*lMQz?3fR*q;ohw8T4 zg{V=^5S!jzC<8lt(_tWjX!|hQyMomj)Eo%K4P*FbaXyRYF6hg)$JmdS%umCJzFmR#Q)`NpuPPN z#&RV29)Y;b5n^-K^e)VTE1h)+AQ#PUw)=Hq$gjIlW{Yk`s4X^%LNGj!oON4Cwm$(+ zJz*S&7bUqxAv;)Pmsk;bi91l_lr&I8gFIhHfzNz*1bf2dC5lB}mdO40Esuk)3xh&t zu~q?gp?LI-A0v<6r486Sx3_c~c$xVkjz+^pa_YFcWg^w2oko&W`nV@QIu13A*`gvp z;a0+a?02Y+jRonevYa&_-HU-Maaw(^s>eQF7a`tPvMZj&slJMj)CP9K0i@@p?RO__ zq6YN2hcSfDLx32y&+nJM8}?2CoZa zLU5k6%V_sEn`pJ@F>3|v(Hz!J%5-V^ghf6FtlR}iR9~J3pV|>g&oP%L))dLU$|?l{ zaEzNFTGR_A+*;?Kl)=?0Q-P$6^wv({_w?6G#evUknrsPiiz=AAL!!&;>g*hks_c6& z^-^m)I;eb?=3VMrsJet|x76Q@QP0$(JY{}dK|*@EcquQV`y!h-8bg;gCxUP9Lq9-9 z)+8rBQFPOK+eTbwNEAsje(cSUpv8AzUHvo)rPnM7*%8A0+UEc<=y7BzdwMS0e zA<`SkU{+>;wX6$xO2nJ%ScBP0ULkXLSlD*8Go||p@A~+?STm(OB72vWO#_%Hzu#8z zUAdfyY)q_wn7Te1Plx({+*W#=J7f46rt5gPj_-N#=5m^ARd`G^wKfc9%1ewXD!$@&d<{~CBSi1yjz)=*Vc2P@E zugR}5A{I`>oTl@;<};6Gx4dziY{FvNll00ibZ6-~;mK`HsK7a7!r^0E1>!(B9OP&+I3@H<4d-iy!$CMP74Qg<(27+ zIIZun^f7axWtSXw68VhU7f$r%vjL)|5>}-YKffoeu)+ZgH&P>#GxLd;m$*10ZOB&F96;GGK0!Y zieQ5sA*&{^_?f;BVH#n5SXX+*aEQJV@kRtxEsJf208i&@xa{9FsON_0*%PxG;bP3X z@}7-o#+lF-+t8^(kP(~gQd3K%yu*`9)$cbzrqFq`9IRMJ;&?f(LE=ff8)Mq<2fAJ! ztAwyOk{23o_ZWFNZ>@|Sq0`qy=2iiSUmlNFOu0*84bdp!G^VDN@Dcni;j1S7G7ihy zrti1Ry%Pi8u>c&oJg5)0smKA$}|@H4j~p*E6H1l+d+cIkS_}v?2`hqA-qDQ ztfXRb&#k{GCI^L^ANrDM=nS*zesaCRZWK^(>^ikI`-peMQZO^CR+!D;_8^IjLODSB zm9pVoc5v!*6b{25@{Y4Pq*{E(&+rPC!pC>TR7K`H1yGk*kis$OGD$y?8=;s1uLJF? zASfDE_pCm8;pz-rnU5D^Ga>4@mWl(soB5i@)xWqp#ZX+Aco z?hfZ>d2sr{DuzSzCph&9`hK;Y3`iH2U2L>fsAMe*HPy1}^1u6z#^FLty;rLK;kkd4Y_8`tsCip>J=9UA+5z#y<~uxm1qm$#gCKndx*-AC@%7hq*u-(!+Hlp*GI-CFP2- zPB!9^6Ang&LLv83YBMmj#UveXXRCzx7GM~$7^I_I{UC;9U9nWFjPxEAxe`NxHCoAy zP`~hb>pWd7K4qWp z*9_5OW*~K`zGXK8qgfkQ%Uk_d(>r$jkgo<56s0?FL^?(EM65cf2Yyx{Q@nsBE$^cf z87y{C?9b!(EYxoXL|f496kMhSg)bU?{~Pa+R$k5@`2k#+!i5b0NvaM>3Rjm>liba; z8A+47{i#~(XK=rbEhBt(n8ehnqdRX7&Q7Eqjx6R3`!+$dav+1|6qfRf7LF;sDy4N8&LO^>dgAwvyzC|_4c<< znjBZo02&$lQ!^w#j+c*vC<)2C( zDA>bPIb+7HcZ`9%Vx*Qlkg7H`@KLgV{Ue_rm{XXxU{m;|dB*K_gh{`6pN5DBQ}XPI7XC8`>$i!{muzL!<{ormh@H#66vms$n&`b=Re~GwL(ST zipUuU1}rv8M@7{f13ujadfumvJuKIKvBeS;72POm&ps`6Hu)s|OrVYf%t=T+C_W3q z8FkwZuttZ`zcw$EKR^T1)xkhw2X&1xen zK}ON;sXie|eZLIG*3H?W`<^!Udb>I>23o7Hka{fHKT7^$C-1R~j72_&+R|Wxi1#E9 zejJT=snC2m9S(by=7c`VB-b6(b{fU?;_o(s105N<6h|A4D6_nm5|l>k77is9QRnn9 zge|rOBhAPN672#KTAZ^tEf_s@!@P%og{$9D%{LBZq0=Z$i20LyKX`~cEY5&9;$43~ zK_(kX$~T48%q-r;bjzffo$I!N?(FuVHuYgoHRAfd#{ z=}c2BUI}PV062w|Q2ime?Hej($)~Gd`|(_sQ}LHmR;hQ_rdmoA>KYewL4eU&6}Xl4 zUePQMtUcXV3CRrsyXAQ#7)0;a쯪=679L_F|-+!BX*PCt}Q2tgxJeUYYmvlpj zmKI4a0&6m0--4Pyh`5HV`-by4OvjwOq(ErUSyl2Ff zDQ{pnr!*gE?A5D!x&=|Rd}J)eg$_mj^U%foB`21P6~Fj1kj67pfUH>{-FRK^Z0h+U z9Ww6FYhy;w5$ip4y8z}qlRR<#7K4uNGcKGD&G2;DIF#(r^CN3#8c6kZ#!QEr>@J% zG(UZk@|=kN|)OZm)*=HhY{B zSFf~0um`@K{IBe@V8d8Tr}Nt_hCVGO_>JX=kczsqT8-sI5wNJ+n8=c?5LM!ZX-Yo) z?#1=cT>}s}wt?>dGD{}|ZD03AngcWiWOe|s&?|P@UL@rWP)9_kx|?0LcmU*I(|r#* z%|7VnzxX*)A56V7#8{}L@!!dUVuYAwVu00$;+!n)|ImA89T9zr_z^aEs@!%%alOc@kWOtWCKo3$B)as5JbnfX1)3{0DP}0ydD2NmVrz;{?)lw zY!XW45u$j)5d8Z>f|KWWWDsEtY_E&dF?{38x*El!>O)l!K9eZ>3}Nlh>O%PM3TO;i z*MT#csO!5^$Q^PGkW3k^>du3YHt{uG$&CkkG9zH5YI*Y#oT6>BIuBCiP+~3QtH>N@ z(M@`|wloehLsdDkqx$SF7AXl}s(!+NnFip5P)OZS3Cai*VhTwzJK$^{2-I;qV#-0Um50uYs%{%J`6Pc@zSjSPveH65_>4v+ysPhFA z0cop`uGB&Xlsh7xH>AvtIg!6k^^_up$l4vbZ_iQ)rSCUM#c-(NBsG6)f)zk%{3@W| zo+R0xzs9*twC&+JBg5tgPX>2v0``pEe##fOQ{uMKj{oxwb}iqcl2pI-4*SPFY>WV+ z!YW>WwOf~Ya?J|Sl7ov!3G99DYGH@n0B{X=0b@He)&c(8)7!s?e*oXO?ak9px^aJ* zR#9Vp$dzKszk8eua1A$>k0Iu46pfrjg_&dpCEe7GwFCSPQjGA+Mt~G5AKUIxThr0_ zg4};X*M1K3OaW%IECTwl3;N4N*nY4N8 zeb%r&77tD4G4S_Y(%j{;|ql#GJ^MukYR8E zkE?$67pNL3rCbnZS2i-V%;R(aN5IsAnPDB3MY3yBKO_;i^Z4jN_~o@G45qqq?gxDX zE}PH@x5|tCOl+Dc8`$oCQ2F~9f2igewab`A#2p4f5VM-Bz%&QCOpH<-E<-!7a|na` z->5YW6T@|Z8UGqCKD8l>pEYej0FB}<8|&Ydzz{0*cm*leg45wI>Yl0`+{e#PORNyN zr_hI%XpDVI;rWP_5s>q#GG542tZyg#B(@%)Aamok1D=KYlMA_ZP2SJ{0R(z|^9jjn zwM*5huwQr%k}_!0nb0|J`~@&pA3^LuC+Rmn*a>E7^E&OP3A``N6nV2dp8wDDHOTX4 zE`#>-Kj_lf6IB%HkuazyE z`04jf*nbXI00}F)A|?2P$_SKZ98f0!q4Bq1k}Z~FLyi$crUfBTMd{ZWKCkz$h|$Dg z=t5>i-I3C`o5K3uBt=qh7Y!yc)bx#ppF6i&J?CJp&}v%&&a~d&!HIHuDqg3)L?YCO@=6;P;J6@%; zA5YT&vmMLS17|DL}7+t+#hMUgtT?6YRUM~Ez8 zUhlJJb1^X_WP*Jrg2IkO$eD!wbB=8IL-@#__O7VSQ44~?H zv}cS4{S_gKc&%4P7sB+7Twt2s2Kvq z%btS2{WjX0NHZQSRXp8TgE9?lW4MbmY@a(FV!a2Y=`Y63*P`uQXI0N7a^!^5h0}wN zK5OyH=cfi3U&sZ|VS1+%PW4o_Sz8*p=b8=V5lrIhQBy+U0JLX*W-#K#@qMDfVp^wS z^^Tdgu~i#4yZ`#=CiLT*#Xo!T&DJ=(05y@UR(6*+_ZP-EU9Y-ho|B-a!bwkpq!qWiWF^Fs)nh)9@)9T(lLpm)_0S1N~EI#$e3P1A|IW(T=L!wasj$=v4G|HT^ga#H9{#y@{lAu zWEsfp%+kaR*HeU*T1ASBDKXOFWG7pI|DgAvOpZ};qTZwjm}HbRbuH(expxNFFYnZ@2gUs zMJyWeiR>)n#l^CMY6|Pc<`JBbmQ^{-y3_O|rdIQrmU6s~ zEk=b+m0)_c^I#mZgJ6r$i~<8av@CNBO%v%LyhO7MlO2T-i$eoDHE`L?`;rO8Zsj+o zcACGSWj3rPVTdg&PQnj!uSpq0Y`YeTZGfxptE_UWL(~hvU`(DOFcp8HV@gf>UY7Zk zl_{vSQ@qKEDtu0bz0yzJ^kYgUh5*@@d68TF;upLXdZ?W@{UE~0Oz|u?v^|_Lx*QdR zxb3_`kt}lZWX|kUEwy0UjC2SF2A-3CP=bYI|@kJ=ZOC=M)bn#LS8bgF^e zXY-5w$wEZ8&95Chw0`rvn7>vg7czy@;#7wKDKZ|wO%z&BqWv}7R$S_N2UOyBW-R)OhUnBD7|wkvE4 zcY{?aZn@qDne{DaOEfz-e_!UhQkY+6_^et0XH{|Pv=tfFPHgrZeI}Z>zWrp(V&!4& zdBsUz`@OO>@g3N3m)}R8G#aI{=p%;7&2fo8SbF zQ#%hX{y7nE+ao6EvEtjeZWI8#xPXEG0$|g>M`JFamv~UN+5BnL9f=gRhZ^Zl*ge%+ zE_ur>!qz;)Gcou1G2q`X#r`)`L;Rw(m+~e&fv)$15@Wz;|@&tgY zN~hWKVUJBO7ZyHz`dYCa1Ej0?#=n=sDsq47m?%QgO@K%L*3yZ#^TcYvIm;SK`t!zh z{$tJvi+_|=puzTsJH3E;y|pjBGz7Ijh=6WdmLB3`C`&?73qQlFrIh$USkrz61y*$o zu$5{{4}jIE7aJ;(yMHiO?{)f)YAgfi63im5G>SDkXa<<;Uee3q$<}ID84Bi(t)c6+ z7tKTUPL^BaRjfmt9mbpHTMo?qu2&&g`^HGA7JpP(Ff%?s9AZL;&{?-rURM8PrI0kF zz_S@KpP)hRuV7o5y|M; z`w2QsphhVQfCL2v6OyWWFS1k#CwF>66}U;8Gc5vYccs? zh~t)JK>O@s*p=TiKLm@&L3O0`Lv_V&N>QZ*^V`$ezSyqDK||rfOf!Uvc$jE#&-etE z=`CR!6G@{3F*`^cGml1*lYhM09WB)Qq48b`wkg99OQkRzCr26S$7tA6avb>8$IYr+ zfo(K~V_`vB$(RIu@a1!m5~BTBZVt{2t$ujtPa@S zXtdrQa*SgCpydc-`{$+b%J#n@Q(WNKYYVDD;TR=P*T+U=U2G~O8_iY_BED=xHuF_p0S2s)y7*PkVj=hBQJUn>!kYjCDgMEt?gw~77;poA zH*RNI6O{|5{FOwa0!+oBNjrV}@vh*u?55jyLD=t`rL$r;>?M*0gFgG?BIrWjaP1h- z_7OmQnQk6x1EZ`Ekp8%*wE(;EllU|?_$#uDb}3v6EY9pK_n3ytn1ZbzQBuRK%w)lb zS6RZv6&A_VB!v0(5U5B$k#WMG-rbxt=dVAlt~l|^4@YOF6W+SOvQ4y&!IXN0u@dGC zJ{G+FnHq8kY?|)-P7|*LzUtdjrubE4q(LM1mp}yhJ=q9L*X?t1p_gT+_KtmrWbe}C zJJbGr_`(chdH7Halm(|V$+4ioN5Lw)?yK=~IDQIi>F+P8uL*X-=>F)sS00+A8XrNH zpF}wOY1UKH5!mR1@xNouB;<^3FTiOjB=@Wa5j|wguKP|G$m(yZ6)E6dAN^)Ssz@)2 zA4|Ib#Rx8X{|))ee3g;H$cu=_S>N}8h*QGl?#Ssf-zHwTUeqPE$_6Qv&l*BK!$m)t zOwuA^_hEYI5i_MENIKg1LE#`h;0aSinaMO5VaY*r+l-$q?57s|NJ`yR`V>e)k5~C| zlRn+EA9G!4e4KAmchW4GeyOyr6Gp{vbw^(F>~*faQLkV`1kQ=a{MDxIK|(=CVf%HG z1i$W45r&$?xIy!gvx9*ID{8rhRs}w1DKd(MXb#;p!&pB*0*)AvG>P_B<%A1PcFzI} zNI@=NaVNv2i+!bXq}yI6WdzUeM^R$%HBIeaMsj4{i%>wxhsWYdu!S&nxTOg>UM0n6 zKuevJLsUK1gv3-@t&Xn_Ijib@J@dpH%};qzd#3G!NncgJaTx@AqV4%Yy~4WBkF+<| z#N;jesUH5EOE}%9^`_UkVB5nO!UIZvaSu%GgUn~WpQ|Y;$3lKOq(`1x+#<2E<|WVRCFw|PH5u5W z-jih}7F6T~Nw6w)_O|VaY!W9cb?5V~!Re9Hr-2=9vdrW=LaS*1Z znavCPUm+tk2AP`MG*FehmqhHF>R_{wTn*MgBv_DA@r(b^Ta3cS5BtFf*kwZL0>7ho z*Dl=J+*|L|*_pOYl|J^jxTNCB#zfVv3S`iTuLn?*@(mym@K4g3I^k78En&!|5#0L= zJzLMFPL+KwrXQi?Qyxhzo;;;xS}ecDtU%aSA*p-MtUrjkJ%mkk(Vj z14T}kH+rhFW0x+14f)x>3`RoW;hT^ta3S^+CEUv0(%}yPu*^lVrk~l5V^1rBh(C4Q zJyGa*%d9y zXgWO_4JjzqXXH6q2Ug4$M%42B`mdt_7;m-tDPS}8*5S{<>#Wu3zm%aDb`maQ-_VbQ z@YH&+#W@nu8R14ERdl>f3z}&ig&G&!jdF9WBMB%_rd%~>MaXhCWVd@b>o%}-R z6#}43pnm(@AtC*owhWZN{mckY_BEOCLv4OaN)|dj!7OS}V80vRu=<1+Ei=hU)l_{3 z^j---O$=P1mE4he?^P0*$A&Q1r8pkgNlf?xH z{dy+#t5;KU^{3$hyG)A^`;Tz`!jz$1Uk9kDe+|L(EPY>KT>)p9*@ZGG<8$H5^P@S; zQIRhAl`56PpyXMHv}-XV<6Tlxpdgq{;Q1Si$MGk^H2Ze!2wRBB+SA4kp6~P?6nn;d-WRXoReSl z=SItB#So-o$;LG@eGTYhXyRu?qPKV$Cf$2I7p|vc53y^({6@=+&SfE-+w+(`-Z%8n zf+;(pV2i|BGnJMixw{|51bN(;*KIx1zBXB*@?zS*U_TM5GqfJsdJ-dC!svd=i-%sO zy00wYl?9DFIk-$&>WL6$8<6_XKF7N{YmAF>zPLMK=@uyUI_top!jD>;pyqA4WcmMaxI_W}(D(chC{>UnD^PPe{{9GJLHyszwPCKI1hSHX66P zrG~DWOuhjnSOYt@Qi-i{sB3^25NVQ)sDqwmxz56`D4vsdT)%O!+ zhisL3Fq*M5b~lFC6S>S}*=ZpMd@+n#wQkBgXEIWEw4y5do&|MY5ENDS7Zk&a?9jsu zna3RHF~NSXbaP+-to8Q$L@nLTpEH=Nc9Bn={FfF;f8AN=^7mt^`!fsimEV8ew<$0{ z-yydZom})(tS}6PI=guC!vQ43r(?wrPdxPBiMucJwpF!Zdod?m3?N?(ANHf|U zEQZVjp|}?rwOB`2Lm@u^3ruT7n21gaD)IBMEWWViX9u^jxRx{Ra+W&+;gE9=RbB9@ zG^M4IIKTVhrKdagO$`1g9k0w9rs4MyP2x3f+bfG+kVax%p6iGbTt5v_rCmwOG{pTo zXe~c5ZJS_Wu>}bNEW!gdFmzeiO^dA^z+atN4a@bgy8>>-I~Q+w-i-YW(%RN|yO|ye zwBX^Tyo2fRzh@~Rrdo}D2R+8&O`@GeU{hiAbziy@9$KV~&w6m5D>1TowB>j~?ChyW z2jc^GSzwlASX4EgX+J&K;v3)BbeO3WmaowmltL$uz~fy{NtE#koZtt|OwP`@b8fN< zlwyHr|06Ko6OoIvJ&?Unx^|Zk=SzrOZ$~oK$bq12724sK*nDn!zHyC6RNe?Am7t9* zV9XY?%aC?Evq#P!O63@nB%V*w4czU30zJ*%ukxW{W*=|2E#G*cP1+W({2I6>ivQzf z3{LU~bTKwTabLHwwLCWz{sELgolIeL*70$=J8A*1XD0W5?s6dklRBQ;r)M@I<=izTCuhg7yU%_d^8>C_w)u>TkP#CkG~)G+a!4Uzx0W+fCq<2c{?U=Ikta zc1JhgK>n@Y$Pm{pByLLpLffz44iCB0rV?aI4;N_)v#@~OGk%XBEe6jEuE(j!x}ASf z(%%!=tgXPLJ#al4ww;!6u=?vkFBJ3E7EM=F_G|JhQ)){_AQ=0B@|bmu`G;$4)+~ZP zn(j)+D5d-o=Geb@Sr~=^fwhKt96o4dx|>M1-UjWv3Tp&~v?kpyL`L7Pc>F}S6i*i? z=`Wi_l3TrlA4bI!un}!yJR=%MJU7~THZt_~ote-M4-~r_|F`MxUaVx+moI~DvP4Q4 zHdvD$#VSpGp4B6KLh~IAU(Q!IJx6BcVV^0Ej-X~lep=cdWBxV*ZMb49CtVu6$O$|7 zLu>&9%%&D4r#MlQxf0Zy&_2sZ2h-h$h_^@|Zw7-W1o+SQ!VUchHHM%S1RY)?dZRae zz1wuFQ!+L;?vy7_ps4Lb@vDTmEkd61<)hyVeCEup*Q0UPZTiA#D-#$_-!$9v;!887 z;?fF*stH3T?bk5JaLSYOywGdp4$}oaU66 zY6Rp zRtxj_JV5?(Gu(X2&WvG)DrcgO`<#I<$hox>@R6LwH&b(S-uV-81rjv?`keMuQxk#p z&ikGQe%{^VyPCWN3+MLV-!lN8N{CvpV0^KYv|m&hNc`fE!I>YJ^`7 zb}*wwlh3dMzqR~buKnH1-WN#PnJ#KImd@=3%bxf*v;IxN^h+ROY8(Q;>7lVav1JBi z3nx)A=Jo(44Rs54*}CuD9+sh`c3ZCq;;^>7y!p3s@4l(8B0;hqy~);2)u||LjPd2D z!kb}6!j@Jx*SI74X3XPH-OhW_L|*5WO@W_(2mbSSY$3cMt%(ZUwa^DQv=ADDt`zX1 z(PeCccN!^oD1tDntTDY5mrTA+vFcOkQ)%wt@GIhS!=0YUk80Ott-bPCFgjxtJvf%Qi4OJR6`3CNy zC&9DYV>F^K4={hAmEvt{W)hFLy@9bac}cD6ePKxkZe~Ll|8MIfl9ubOt<>0eEIysF7eD>lVeU+y0ae$N$AXDAlY#P$eU9 zXBghf-y;w;?2VPp_P8E0Bw5R56~^OLR_pHzJ?s3?gi{>>8Tn>Lw8Vm0%jTOPzp3&D z{1YJ;Lc%J&Xg;M@T0Pwgs~K=wDmTdG_Q`_`ZD$<${M z<<;$_)lmIoc$`Dew@WwmDs!_UA8~)?QFJUTRJP>PJnovOSsY;c@_C%ZOZTM2YlRUW zGlh#_E>PU!Z0}KMZ{e57a-c(x0B9K!L;F2k-HP_EFZ^_=L76Wu;1LEBHR+;(L1h z7I<`Rv^Y});v7G;)SP)~uGaqO<%eWE6 z6{ureQ`@mW1cE0*-l3DF23M_iDL+z@9N>AF>c`74V96_T);{_2dm(tHVEyr4j_M!U0AwiW`JYGA&9H zHe^dL*+Ns*L~CUU*v0P|E96*kGk9Qqm~(S}uaa=t~{=vdj0mO()S-bwxeFfZS~$DGRQ`PhH`2{l9& z%^DXHcTTUjSoRVRl<(PXBJ8($X&1}rzcAU1!y3&tQj$cR9|36M(R(}NfqI+zf zeH1;VpugwwieFfg3NY*C8ypfqORJ;wKf#Ji>#u{u3(&7$ETrpH{S}ti@dH;Uu$%(x zM(*C(Rp4^>#qxcap)SZgTScrgZYZe!nEOQ*%Wet|){(ff8%~nXj9mSc3@4b_*DDVH zhEIv5-mLMl3V2;hpF?quHR@H#llZvI58P=^ zzi&?31hkRnchin~SggkC{b@`Pb6!>TTHBakyB8|p!dI;;g^dxA!+hRw63O@T`7p7|u2+XsiUkf$aX(SKv>XZ72VQ*{Rei)QOj zri;96Z(z{ofUIZ^QNPtN?D~8aA8si6NSy|_3x@Fly zUib@js0*z`_gBhxAz^;43V2AR3NZEi`EW7{`#Fgg>D%3k6*)L%2^JrrH(cMN@gUsW zeuaAgb2hzB?tKk|K!#twMg98s1*R@`^)uM}`XerwP$ znKs7ag02}9l7o_T$W7@dBu&7Yl|>_X>D_z4^%DsH`bd58@+i619{U(70?n{z{q|?y zZV?j9&KaGwR3q)N>Lh7q{Pf6$U>Seg0c!PK} zhg_~560jTTsn$uTnmiiLu)7A0{~Q5wpu$;fJp{O_y)kCG8w&1T^oB7(O1K*uVE2=4!UV#R2TifKWkWh2;mVWDSFkOV3` zUAd_pyc3T!&xE4)LYgKCRS>p5|2oq6z4ZCP{bBR##3;7jRQ+4=Vr@-aWE)LvP2A8@&1S=5N( zrx3DG+@X8W>+xnorkHZ8gu?n^IDkBLI@2f5Y{hY&Ppa&BwBxu4=XRN@!_6VXQ#i z)=QO+Q0!Ez3RLPR4dOzEuEi)ZQq=?E%esFIYGL@MQg-Bm1}&X9XC`UoXEuxTa@;#H za*&g294&o1J-Z$llKi!Js;(UB@%N^clb-#Jx?-{d6SMYDgz?PdnI8n)u4P}eHyaGw zO4h$T{lQsx@QMXVS@RT zq08|c$H`Iy2A}oBujG?~7;-!MV&e|)Hy?hObot{t+0GQ9!*XE3|KNQXBd?F=U8}LT z0oi!VLnE^Rxm$a^nY!NgEF+xEKXH!6Orb$y%?q{PFA%$NK7@kC;`!D>zZklY6wXS1 z!sBo&>1b94$_&F&L_KK$A2uc^(N?xX0xMe`GdQ_j!8~}B%r__d%WER9DZ8<8N z72(ht{(`I$Z6S`)>uu``9y<+gxQv&lsG{x)gj?LAnxZecPy30IQi_* ze**kFUG@>sKC9<3i&5@*zG1A$-H;w7-dKJorMpL>zL2W7nJ90#@-YR?zomBD>+}tp z%{&U%QscC_?E$T@uMo^&C_YfD)gw)_!L#rz!rcAh5|tWpTzvk$u+d3fSF}DbK1G}- zxgwn#Z<)r#e(>$$MFPAHEHF#I)ocW&6TUdE?lOagG%wVcqc(nBkA(%f+a5QMD;SJf zzN8Lv=hHPN5aFjZSkIaDJ>U&VFuewdcgE85M(LCB?a;GEi#Qb+hAHH;z<%N%&@>E? zXg2W!cfUMutT)r-XxD8ix*0W(ZJ6N`6YwCpKliV`^xWX!AN7U)FtjGpB-NAyS?GZe z`-zD6f7db8Fd{3C(g{=fJ>=3yM_DW4cS}gWM@nUm?Pk^9Fe!XP)o2@bybTMSje9H{ z?0ZwjU4p9==S|)(Z}nkXT~E#4UN!lU z2PjmCm}!#X43~!8dGZGsvO^rKX{4|e4@00~^!2Q%I5}}>0-lm2n4Thuq$HYnp5Nv{TJxm6b@tY7e=2w#0da$RBhLC&d^U6x z{&l4XV_h1guVu)oYW3}>ML(ph!Z4_tGy76Ye58r5r0+K&U|%L-H~Mim8PGIhaA8p; zh@gTIB`r$0do#vhaw2vZ;$ZE%d6hvRa(VGfed9=53{AYi(M;W`A>e|GgQ3adG2a!S z3{J-UQ8d-uDb-I5*p)pJ_+_2Z!2w`%X0snDfv_b(*P;Qa;BE>(MUda^stbh3CfaDLs@XK^K{ z6;OV7r9SWL4Qfi-Qfk!lOs9E=py9J2du8zusu4@!{opwl0WzE~R`#A&X2kfG&&|%B zWl`8J(nY_Pk&gZq>r=3d^!D?-39RW(aETjiToD89o(A4ix#U)m(nk{b_b`+rUVKdd z#)0cF!4pd%T#$gxI$x2w#TE&_3RO@I)C3-cJJ%D z@>KtXmQBqf{zdV6+wX&Z{T{oA$Sv|oqW{~@kOZfoH`hs6NZRf47uMUYns_aIn56VZ z?)3w4C5m#ZaRH&v;YhXILK$Y;41-eK>h zeG4CNA=pq_3cXNHgo8)lI{tmDhstQJng`?mCle4}5zChBME5?)YE~ zDnRKhbCzcloSJk4N zxp;j@KEQ(!Y?s4ud4D{K=efsk+-KSpeh>^&*Q;M5kCE4lkAa`Rf-iM!`4C=zU!bga zuS)vOha4!o0)<+SSljtb`0n}Mo*k7G8W@=UDGqjv>^xD`658nb5?T|mID}9bcn1|#`b;!NXU|9m*9+t${RzHOW z?SWWCv{rU~Q(1^Tzg~x(6Pc{|;6GZcRp;FG&v+S=zOO(y`p6uMzrFS$|8ADXWW8u; zmX697Mep8S@pMYEc2hVw4!?Ct~5Nqr^k!}L@2Fm z`>Agp1DKogvKS4|L<-Q{iIs}AtrjVlii?ati*-0i=LG+E zifGlIF=;gC80xg{5o(rkObXU9Pb6Ii(X^zO zk(W}N?;-AAYEBge~d2+jsiV`0G#7ryn^W)7Cyvx2u&{UMc0RH`7vF--D!5Y_D zL)}v1`G=y>AI-(93Qr^TYNP@yk#t{lVcWNoT6fp#T?*_%Ru4({oTVv@HYc^{H=4m) zTkkmgcV0ZEoR+=%`O)VlQd}|(;P8AKczjEhsHz&tx(@8AH6L}{?!k(Q7V4$!5?OF_ zASwBhTaHKZxJ`6eY{2PWOaj(SR@B#0(9Z#Q8N5mIcUb5Cf#Qp!|1N@-XV2 zPy^@caCbMD&$m2%9tC6;r6th7`g8)}kotB#uJue58AfLX=CpCWt9_+f!noV)WK?PY z5wAYLgDJ@EvDyNCY@7dgK%8`SQm+P_#fq+Z@k7ldJQK?-9)-x4UKW6L(D&MVxr;>Z*y=pfF-8v>5m?}LeYl;=WP^-udrHMqpFqyMKo-H zZ7gX(!kp)WwI0``?D{PSN2L0V+$h^g5R3Tz9wb6Ki!7^lop^?ovOy>&h&R@(AJB2D zWDnmV7YZ@#@CS_gKco3s|CT;;$}l{0p?7Y1&*GYG)Bi%isK*1Mt^K!HPgdJVPM4eJ zkAD0M{LuftHD$5f=6K zIN5@+7UAUeXd3&N&0;XTnDEVTL-HB3jBHW8@3+WE1Z_p}6C*p8F-}fEwc)%(gXpF2 zsFj};C!yf54zjhLRTeq{+lAMfx2y8#uRT@;;AcNG*@71(oE5i=5BuS{k=9E&1?j4l zFf8&M?ZqNrJ6bBF*OEIdPygLpW6{cy#?3P`qy=)Pk0^SJG|n=qidw!7unH?SO?E$_ z{+#v@*Q8)ST#H;U(eU(*e9DEd7NLnWMwA#r2Kq$?HI;E&;BlsZ&omb!=no|IK3`sT z?Cg?rz>VeG%PDW6(P(nnJbO@yf;Ay+hQdS4p7DO!Gws{&_?*x0lClaTf7IAOVZ4g(3tg6q<ax@G;#A-8|kIuHJIg-W|cd}S#2d4Yq zi_h!&DM3!xU+5u3PWQ@tA3R8i^Rap5A$;!s<>KzU+2e)Y0{q^8$2_#ySp^9HE znnlm5C4d!ND?J22$KYVsk|PW6)+LT9laV^wT2}7tiA5>C{2`k!E4^AE1=Y|Y>`-NsDM69BC~u&?cA?&m?CI+7EMEXd)f20T;sp3h;e zbUETT#hwiOKK2Y94h1Wm!w?6U1ugP zi*geokS(!FQ{yX<)=-{fQuHX|HmKIX$4rDMlEM2{w%d!lfO5Rl^}83ipV8{*QklN! zjKiobI|=79ap*t_ZjOJ?`}Y>EPSOA3NZ)C~%$Q;N4WEVSnS1)*vaK}1s$!n7$nysC z^eI?1EE2vKI`d~g$L7h;{A2T&+sYoRA!RZfI*l$H_2jamn_!MuhFd``PdL7W>aq0#-*~E~`dk(({ z9(K8Iu}2)mu|0cl{rR92BBh33mEwvhM@sf;VdV$3oT4~H&gx^8d@{nq&nPTRV^(gv zfTyO1jULgjhGcW7K5c^C9@(R2lIu@p;&15y*NPSW_4EW%e`o$U{YuS`cna&8&BB^w zL~1B#->z&ME?#>;ZQP_9KA04B~DX& z6Z^9vW){xHR)Rx0M-*eA9g_$Ot?=I0IPBLz#AiT)iOyjT`RelrH$VI6pj2z?jUDB> zAHbqh&YhU^qC$6WnWYePG*-YSCEr+wy{T1T2RcF32RZh^W{>k6If3-X^fRNdwlqGT zm2PGjrRTT*=)ci5_*X|2c6^3&H(vaKp4Jk(YzY2oQEa9I*7hR&g07dJI85fK4^+@n z&{Z%zfJ%E%p{$}Zv2y8YVLByMK$PD7)V9kgh-7QG);`cs&NnjpYhVBhWo}p}PPH2; zaQsLJ>S86rgIJ|h+4C&i5QXdn1M&3bZ5W(x@mv`zen>+#{6KCokmk>zmX2jq5q$%m zmSm~TTW9u(35^Wb3()?9Z-|DeLfq5+cJJE#pI;xA)mOToAK!!iUTfxgUgqsnT7b~y zbTQMo^?}TW;%oucXp5FTlVUT2q>f{R0~IOVgnh#gQBo=lV1R*V%GL$iB$LJPd*z{# zMZs?s7pn;#%opcfn*lHEITXU*+8#YNu3L+B|LmfTQ5lhoEnA0b6GF3i<>+xXc~cExP_CApY$ZH-I(CIzsxmEf(sxR6sL}bb z!4+~HLwz39Rd24$Ih<4{w=p~ZG-f{-iX7#w@ANnD_`EWsbs-de{w-Z<#DSze16u;J zj&=M)T)n9^9?lboMgP!_jB?~AV4%wts}Mm+ho_X7NP2~yO$#X0=G{_@d>rLvv9E*Oc5}U+2SL!m*V1D5Z`Y*b{w7COmm7)?*J)2}?W<8H&LLQBqQNz zj_%K8X!*&HqVdEi=Z_=|6A!a)OtG*lh;CysUT{)JnpA=+Ks)rho&eb=tP~YnCrXx> zhzMak zv0S_V87!+wPi8oIkMQptIKY=HfZkwsTH|(VMn%$mNhF#m*Rb8Rr$Zmzbd`!fh-9RX z2q614N#=#0Vx2Nd=eUE`E(Cc^X$eXwJ9*!M{zq_J<`L|m?he!=%u>hAV6v7=tWmt~ zL#AhUKc!5yNY8Hdlh-TU#>k)Q$!kKgi=CDNbMWM&3%tQyuI z9n-U6pV27E5K^No8AuSP2K<-zKUp@ktAUXQZT-=YaQ+`m=!+?X~?`Aja8v9Bfcy zd9)sIC=ujhkyy3h*1XwdUU4)Pv~=TSJLRuFNNx0@z6a^Gpvj{)^uB&4lsP7r(tXbw z0}-M-R#B6LCh94eW`_Z0BiSlZqEhdJVV4%mju(vMZbvDm)Qu+^YTvoZ$T>H{+Ud&Q zJsoyZw1Nux?bkgiEre%twOfA$?$!;gBh;EF>dVr(X?jyn&&i`oKfxMGV zx^PK1pmg^P=aU5V?ODrm|!{@@a4d?9iswel+*HKkGeK7XH20pXGyjI@gj>X75=eB3APF&Z?hODIO zO;oDMJDEnO3^!#-$NBUGT5}l%-4h1FsHirJ{%&|0s*cdW9gUShu!u>#(e6CVns? zn^yHjDr`N~se4+?4mylX(>Lcnyt$o$da#y#LvAn61A@v}e2_~LB&TP!o2%%7nFcEG zB7l|p`I@Dw9~#UCqRB#X{U6YoRI{egls!R-M3kWMw;i}a) zbb(X>ez|OaykjC!FLbNR9z7?P6wtL7b6GE?C(3oliSTAle&Yh<3B$6zSI@taEWpo! z(*O>v)gr^!S@%?%i>web967qMy&Ty0ue<=G-+Ym+MhDjC+RJ9L0PAzgm1hruzpfh# zvOM$%GmLQ;8Li}I5)`Fc*<)~NKs9OH?3`zy9y$gi zhOG23!t5f{eAMeIdL%T6`giY03ZLWigYELi@7%ceCqLwLsfF>B$r<+g&MwRq$T0=HhR@<<&hkF=MlgG3h?pKwO!N4mkZ)2mt z@W@YSETv?j9NP6oU_b#9XyRKqi{%L1dO$UasyX~SkIVhs?sDz!3cc=&&29(?*9zn5 zsqSoPqe>Q>n!MbE=lDYLgniO;3R4E@oue?KGGX3vQZEB6Vi$xkZ+O% zR+i7RItO>Rd2Do9llZ>g@yOv+M=OcVzFo}=o#2BwW!sN3Mv57g>W1u^z6bk|Y4mn! zp?9>3&$9oaGW!68L`i+_I&+W>{|*oqIy4LD3L~}H4biyJ`_rI0l#a3<3VRC!$U4^S zNi8g4?rBm{Pwo=cY*<$7eXlE`k2XX*F+jZ{T}~JJ9S=wIYG>L=%>S<}6Z=^(Uj{w9 z_2g0HWCES{DcnTPH0rWZ6u^k4@oV_HwTok+wTS&!4eUAt5~=e>J+*2}d%|nEeVfamXz!3b&O`^hFXJNu4@CEsk+{|G-d591$^fNy{xs z$Nqxz(o>f?e|a(@dv78d7s!@&vBgE>GxoioziYF>cYsiBc;6z*H?AJjv>rJ1c{2|u zoK>#)uYSX|zO3qn)KYZl?%vw?!u{~uXiBX&V3d*%hGZdG#EQvxAvbb13HHEGn?I!h zof!n5LuJ_r73bfr^|C3*Y;jK9KWuD&0RH-#$AswQ*+m%Z5Y3Yb#1lt-or+LmXAQQ6 z%Gd`*8-EqRtmSpul2OSNz>;{nK&;}ORif{uXXAtd6{${_;1}z{pB7r#RPq%)B4E@T z0w#MVFXX(R5x6zh5*&l4WJ!+o)_hMV8AM5I+BVR@FEq?78E%a2sfibn*ks&^sQ8?A+^94-ASci(xGL|asC!f`F;$Q+0)+;FB7L) zY9ndT6A#xB4qkAj@W~kd2+k|aJF?6AH6H$TMz-APSt^%viCJyBoU&hD8=oj8n~-Q*D&_? zWt$VSp#LLP*VsYC9O9piQ>x1Zdo_yE^PmJub`vJCy0v*^BJ8eW$MaRQi`<)t9MZSM z&8w7RM@Xj1#efXG_aR{k_=RM>rGe4;uEIEy|+Hkpp0_EC|Fq?N|+kqRgyu&sH+`c6EmP= zg*AzrjM~XJFdK1G5yR*5=MP4P=2dBsSAU8GnPY*!dK)+$nj(qc*1L~q zq4V395fID!^&o>z0O+G&EkM`4MmnuUVqKr?5?c{*F+C^Z+282kb!Cfh<1<=bKn8oE z8sdtgai#NVpWH1Vek>2a)+seEZ6|0f|%lg{XH9BP=>7mkZ|L(Vjjhy>IuQej+4LgaGYgB2Ib zHzf{*=xOClTn>z15!+7|aTkVG!C6~wSh?z}Q=m_BD9K+0i-^9Btte2j)lkG^Gbksn1?pU{XI^AAdPMR@TjWY${%?A;XG2Wjc zenG=fGFJEjp~-}fZ2_{uYAN=W*OKM~5Sd%P!Y!uAPZ2N+SI#7pkkbt=m1x96#@Ig2 z`Q{mmN!IHN-^pha#I7{e@K++`&`(k5>TH$VofXCs+lUDfwB{ph?Iwz#yi%wSZQ<@~ zuEL|Qfn>E=CubW}pyZzOJGzekd~XMiTyMZ%9Pc>+RpL;=1ZXCJE-z=Xp0pGu3X5lm z>J=w1=LUj3Dvw!()B9stb?u|Uc-L{{mW3o$qr>>=crq*!(0^GW%5?QCBOPiI0q0L|F zgo^(ymHz)6EgRiIWjfVV$P0JJ^Em$)I4};k!GS=KjSVNtv#XJL?_@Kl6n4v2yh`10{uSv??E{g47@S`Q(HniVJ?J^$$27Xp+ ztk@Rc2zw&eVPX$p)D*f(h2nS-K14YicNRlt@uHtF zl=$4%Qid2YlJmp(zSaoc)iwiujyOWC#LW;UT>>^28hc7#q;(}^$4Si`)LCUdI|KF%y*Qq?>iKRX3bY>z z@Tmml3(pi|-gJjyJv?p=6m|cRzjK4u+GFpzvc|eM5IPUn{U4s*!Y#`0`~H3_N(@MM zO2^QR5;Al%#L&_u-3^Ki(%s#l3^jDaNGq*$hk!_@iTq3o+H6x&hJY6w4fRz!IEL6R{nsXo6+}ZGI(EamC@IXUQYN-` zCyQIW4=4d*VX5_PmSoVYf6hW-r-cH`!8k(}UczWBB9^u0)!=DNK^{?|LQfJhTQz zjyn0ui{%)}Tj8Ee_za}!&OAC3*By2Qo2HgxEyoY+OY;02eFF9_aK|ahR71A&&qb^p z`uS?>I^nV{sRvx*pG|0<JpPVWa3(-%d4-d>StH_zC=J#FUf!>q7Cv8D?8r6zzseg1 zDV9;2c(|@hF3pBT?uN%$)L;OAjM40J14aMy-4B4P;S&e&>|P%L=%{m;<&G6k(5!vQ z^w@1XPCH5x^US6%f`Ib>L~-*Evhcxz#_%0p1Ur{elk8`$(upP~ql0S})O#8OdUm(c zmi&L^wy3X2s6cT1{!nL4BEJ1mttd&fA%3+IAIhJErx7L2h%CIXKwdjJzFxYvX#$4M z=pLhx_4xs>IZ+p;-!F#aiHrJPeX$$&8ZK3P*QgTtvk`lvx1j6zO2Y27vS`nFMP~ro z-?RO&eyFLdjAEFQel@e+wM03p_7o&%1ZE<9u3FhmXe_;nM1zmgM2ZOFCUud~3E2c# z{5)AbS(`_Tt(6rPEaLCSK9k!+tWKiM!+BZAN~5nzNO}cRH1y!N-;Oqh&r~`3wz=%- zSXkJ`znln>l%+)h?rFkx|C&@W!EZ4uWs>Hz^5o`f zHh$Rh6H)l$+6wc^bxE>ZWqFEPUVYg_J>J}fE&38`f|KX-s!2^u6*!Zqfhf3orY5G; zwDXf2)j;76=wEsL_7Y1e&HaAP`43?Hv^69!y{Ri!N_)B9&R{iaS%Sgo!Kuzg6yB$U z4Rll0KF@#W`DKR{ypFx0cH$kRQqjz;fW=rL`df)ovLN^yKo@NwPpbt!l(YD0E=Mz| zD{}^ep+xrQkc8f7S!AdL{G0KhlewXOQ8a>A_je-WalQc6)rfw>h8@vLDxMb+9EWeX z$b#o|GL~`V-3QuX-Xarj1jQ20Y&wr@&+w)%K^{3;w$V%;*S860M}sPLoGP}e9(OarR@a)HRwdFW?Z|qx8hVJBt&?7~Qtf_GazTv3I3>=F7w|HT*FaG2Ovfba^ z0eb%9J0`gFdCKjnG^^O(!}g{DCRT7S{cxP#PEhGuwuq1Q{VtuAq7EkkalG%f|!^JJd{&NBghlMMfyLc6{H9_a{ z@o6scJ8XNu?jqENkA!RQ^wY=QONc&@x{r0 z&4nl(A<3mVM$i=3di(M1(JN?V*sGW0v@6q?-wsPxD9!EPD$rNrla2=QUI&;H4b&vm zCXmnC7if0NL5Qg{N%zXiWuk`J1=rp8c$5MA1K>_m)ERK0#@n}rmC)tlx&iNdGlB;K z#<%MjaESvIP%b&Z`O9#n;g9N^e<^KVrvOTSF)@V_O=VU|D==yI-UX;-DpvgziuyQU z?DSQguvD+UgfK=g<}dGv;D3vbk48#dbriqN3xamE_fgjWCy!q$4@YtF^+b?pGq+m(>JV{9`$3O;^SOgW3l^!~ ztyKa^Y{osJU0ppiCD0yk6v_X_-pVGB0GgFpQD& zTwF}Rqzos4C3aJCJ(S$pY^NkY$UxElL;t+YursN_xPi(O@ZguzOjhCC#fbMk5!Q@? z#~Z-gW>fcGT2c*%XSn>4p~Du%&=nECKzo6%>7yptu94h0t@q5k-j9AzTl2!kFlWzZ zy5)gF@Tc0#K10se9XQyRTs?md>iGf&9XUm7|N9ol^L4?)K}e3XMK-|k$@DKC6VUnZ z=UXG&wHj^AI0%g_;tC@@ym#?dOOk$=i{&^y=W%jt^UNeumoxhWV*6-_-lVrH`RCui zus+5MaL2(S(*B7-vHyG+>AVBAEztoQR)H-Hn_unyf^Fr-=CKB&qZz!%=M($_c6!@- zK=F9u31<3F%eTN`wQqH6H`>gVRJCtnhh=P?9f5UZq}~gP(G|yplG<0DLGYXIhZ}4_ zEB^XDQ$WZ8)1=Lg9g9a5S;UBto=j-5+$v^`;ta~I6x1a*ls0!#&n+Q`kyvH|t7UWZ ziO#Qh!pvWQI&Zu8EC?M}8d5CQ_esxw0r|c9%`RDNOcBFGpJ=uueIw}xty6m%^E(5h zi?OXYJ~G?u$4u>#soiBNR{%ucZ0L%KtWWmnU>5=Q-^UI>U??kM6`0Q+%sV zLoWb+-^kequlNGDwGW`UvGB6&E!V)fj^cWCP%|oRs=w zGK`#1Yjb*Np~HBS$Kr&Go0QH?3*pEZN3>SGO7Cgp^E3m}lp6#wkVT1BUe%dO8CoMe z7@_EX!FiLxikBeX`c-l-Eums-=!xg=XMYxLUn^~`&v(9V39=&-4iOJ}t8X7XJ0oFI6c!YLgaKI=M#upBkzB&w*?RSWb!4uf}J4*QXhx)H* zOp3aYvGA1ZjxhNQUo|&{KChrt`n2TKS{8!VF;x@C4PA(Qy$F@(6$93Ms|V}l;WE`0 zOOwqg<@Y=ZDq#5S&)~DH1xS&`R$dpbC?@bL(Kj9iO>{Uim}_PK(;-#zpl_!G z#>mr+F`8$GEAUTr8vU(Na>r^n#E29w{Ttp7szP-bi4H4oVcz!&b2gEn} zBr~>pExM(gd3CfZOu8M9BFCoU!4Kr=53OONJHnP5S&?jlbqQ%U!gftdwQ;eeg+*d- z`<$txgqdRx4Psc$jfdNhev>9DMl?=6J+s&SyUVzCZofFpK+vR=1r884I|;a$C{|ML zmDLGCan{ML6aa81RYA z8g~Ld-Xg4z$C;>%{d6v)aA5AW>tpHQ9gf0H9|mCr|HUMChLFgenC3|RYWwi~hBoxE z;7N;47bYx45iXrda_Ti%v%0SEIy^eDpVc-$%x>kC(61Q#7>_~kg~J&y@z)J>A$Yst zx<5UNO{TXDozS^{ex77GO`pm64pH0}(ahj0AXGmxtSWadWv~JsCZT6P$yg(R?5)3Y z#x^%EJZ*QT;dL)j*QdUS)!ZsfSktBjttzh;*93*r;x3i7P#ecw5D?HR9CH1<)?wo> z$3#cazLybUq-{S9+!0=~H>Q#zQ;p#m-MCL@>zNdiE#xh&`jpe|jtw^;g$Lz5clFuW zRjl;YC~{vve2x%BKFuZbkuSuwYqRE7A~#uLn3;tQ2mZbi~e^l1n0jr^! z@U4aGTwHFx3xBl^v4qJ#qv+EF0$hAmtT;s?L!d}(@WMZM1t9<6uCx5&{7zvuxsTnAx1A>;|Jo2UrhMIy)Wd&<%`*aF z%qpbF{JBWz><)|&UX_^&TS`hp>I6Ba(aW5yn{vp7TM*y1G06CK7jJcu4h5OSh|ULh ze8nK6G;EAItNC#{=$&--Yq#16(S^vuDqsBh^(<2W-Xr}0?*J|2Fd-CGOxN)emFEy|y2svCwNxmY2As!DzBwVGQ zuGZA9h5Tip4ZI=|j<@^7y^LhkDCb==bvnWoaGE?29F7R{bx>Y##h4S2hgX|s$-&28 zA+C61c4(NIjuM)>8U55aIavPPLF_JVKBn#|>BrH$E_*&fit*8FhN73t*RmFX-XlkB;9 zzt{c%Ui<=1U8))CLH21NeJP3%R#St}L{v4vjoV2#d(Y;_KC>$@AO;JV@i0}O?h3w~ z(A`~!$EhGVk&pMSnGd*9zROq2tu($XgT`+MFTnEG9J*@FUDED9GF2~m8Q@}-qzKAl?=f`1FFpb8IDItX^5?3u6zR?eJ=w~zrKXBGro%8%MNxW5 zBydmunLUW5Y))+hRs5O+Y35PZA-B@Q#)gWLlj2{!m(^C{Us)`zFdCkAQayt$< zY-!Q$2IV=e7O_WUEf#Qv4}V4tZw~X3%YH*h(Z4Ag39dDJGJ#}H5OJyC<9t-^_Ggp5 ze&Y|1S7s9=LFTmAv%yY{#fT8>vJmx+Z(>Tnw1y>0i5b40BeHIz>-RnNP;ERSZb996 zFz-f-_X#`$t9DjkWVMhu>gPZuBN#TmDkO=H8$7n40XdH6nFT z&tl?>^w60-tx0By#qZsvUgoypsL>$MC%k>`D1dZeKmF=Y^g3bLYxR6uyGcmRJfRvd z$5P1DeCWusM1tW87K<|0~+oUh$;*VYSla`JKoOi*Z+$L^PiE1f{BxKAh z7$*rPCosnRU=O z>85m|HDT%!4G9Q4(^9@3QRK_a_f9}|;4Iz>={_@yQIBCbDLbT>_O@xWdn9<|>|6c# zq^=K!(L^Ob_o3S=;hQ4&*`w-TPeqTLKd=dP6KjQJy!w?yVr2G8*y$;y2+$n>bJ49u zOPFk_GKm@LS?9O{Q4HuRJ3XNI@eQ`}P(b3s%E2_gL3t^EnopU3Wmtx`5Et&beN+f; z8yyvywX+#Fkj42G&tH7#<8|`)VZ5=6&{#DZ@Cg%qjCsdWgyzCwu>PisJsTJOdd=ec z+_3vI&VKqf)Wh3eFxMD|==CpDB(?b<`OE(&;H_ygkxpV``yBq)?z1_rQNA9JF2`Gv z1d$|F2uKB)Sw%0YC__a-ksFgW_WsoG*1HHjy}I$zyy}bc*&hDB-mli_-LIOU>@6_n z5J5B}Ct)tk6_1*Ayn%!E=5^ijv?`dg8HZXi1TF5*uP^k5zEva}8E*c$neGe&eVXP7 zG|?>d3xbOmsS3=yjBO@pT4+g#9Vl_sr6wtIc*$&&^KEY>%$-ZmBMe`5X^C6RppTk)eK_9T)bkF{Z;TXL)>#G!j~s-63z|G~?qIP^mpSxv zV=hRWZu-lg-z5;H&8g3w*Oy=8c4vezKo@FXNYJf6Or+mrb9y8(RN)6#o)2npf&wT3 zh%8C;hy7WT`lSW@y(gE#N|Hii?&D)s1}YrE+EA)!YNZ3Y#>vaUri?D9SL&;Ap$G>c zsBE1`rUj_^$&{+o1Lo-DQ0JpT> zvm)%BF?-NpYxO8^vo}nzrZOSbS-qE}X!cD81t+uRrz|E^T+v~B`qIk5N1CdtjJH~a z2)3)?ifir6TAEcw^BkiHT5NmnKTiFnAA5(C?dJV(o)4wI@ywp8W_Jz5vxhU9*X?VF zBy%7}%XrsKBrJ9PTYU^w8~!-E=W^x^KiVD?YAQ8NcUHwn!@`~=e1FES&|CA*&j)`9#3cJ2}xtS&X=`zo0PE${!HWZy&{Ip{j42yqsDov2}sINFB(@hff^}lD(YW{aMb_Fh7@p4>2<2GT`RA4vIdEwUP8@lU*FPB zV~M;gv3*4DhY*mG%G|HK@EKJc__|N#_HO%Wph&4i*45~T=Iu^(c`~kcABai;!;x}z zb=$T#EmyN8##5>L5+YJEg1yhtPLQIhtN5<#)1LI}`|n2!zFUe-CM)3*vzK6I3fsyw2Kf>Vy+g*`72DR)1sUJVcoeLUHXZ4{n#Q}(ozfSAJ1YD*WBF@ zF6bW`SQ{xbOEMdU7r_5I&m$?v+dm}YX(iAat{op^94Q@?#nb=wiE$Bk`I#7+Q+@43 z)v=#3eEKs<7hB1MQiS%0IAM2YP~)#I77#3u9*k&4y}M*GW#WWecXN885iu)ypvB-I zQD?&^yYU*`cdj2PCWT!F3N;1`O>0847Acb{Ma6e2@r@oimW3s|xN>pQ7iPY0N|@^z zi*Wp?&l@={{>FEF^&)5w7XE#jRZ-k@Q@^4p3*cnqe9;;uA(9U&m$qe z>}J02YX1YPxU85PPcNpZ9c^!{O@Nano;ut zH^U8q>k-{GjpX}}t3ff55>bUPBKY@HXWkXCECj<(q2U?T0kMCB>L;%Q@a7TT;QBoK z6?hJCm>-kE8`7P-y{C~QeD)ueAgUR@UI~-`cd$;OAUTAo?)&k{)pLI|Pm>HmrF<`( z$_%C_9Nl#q6Kia*$MvC{ z@QN_-VjO|5OFa;2SW+vgWsXLzDT7G13^VXw4mrhc$%I8YfuPkCgGtTx5ZXjf;eG5X z?t$RTtDUstD@R6pSDmiibUJ5Vs)ZN|(WTFlD=v9h%!Z2=Sk$>!x|m103ssssq8a-1 zl1*NS@Wp`UIqns)7|?}+Gi`5rK$mR#c5dQPa)+W|uZ;oU0J*|_IHWQv!9;SEI0&BM zo$z3wcVp@YpO+(fNOwBsBlK&50>eLkv)t96J5}~hW>P!xO{rDj}9iX{sxR_PgY*JKcyiuSI+gEzW?+vu_484i#t&o{UGf_>N|obE_KeVygTm+t9try zH|;%R_pAyV2KKhnvSU)lt%ponk33jX@57_omh_mRD-@n%S^NmDO^&BmhaW-)z2|ga zfnN!!hBmjqV)6Pc3xtN%&DZq7f$@(%fnR~B+?l;%SpC5qCnuhYkt;{}Cz~hlv`oh} z2XIU;VT4H}q|H`M#pj)2R#=5*wZ|%vXbcQZm+C*IoE+YS(t_CPs|dL;X2Q7vhr-@e*j3v01WKE@uzwA zZX6*JH`wLQ`KX0@2R_SvoQB6j3D77X@EbWCrMbyx}B zcE(mj&&9gW`!|Xd(;W&YAY&4(4XCLOkmNkEdPk`%_;a2s{d)rpm9!s~-}5D~#r>_? zznJkZ;p|2EuZPk~HM=p^y3+GnguOlIQz2b`x&c{NC|=RXh?K)oN(HeQi{4mX<*c2! znHrPt!08P2ClIlp(iEiw^8c(7Jeb~y6X^&hyLoymV8=aCFxU9Fw?9m{il~H~~p4_CnVBMgzJXqCU7EJa)`X zS8aA@_`vSm(6u{#^-{4uYuUAjY2IZ)~Iwjrhv zcuI1aFOVtE+SN(5RGDV;Wt&)O3lcQb8aJX6rFDF92v@q489B7u^=tQKc$UOSp71vX zwU@Ewk7hlsSLu7?q0+Z9c43N=hb@9}$n_|0z_5NEuS`| zc#}h=|7Bb8gw2b>{`53aLecg;%?YDv1$eqOSO@c7){yc%U2a&^~Ot=V~S6$8xA) zEHOWFRm@*+>yeXbxUMh=8xQf$s<$ffALPLt)tCK(Vi_>Wt0FBJ!M)3TR$>td#|L7) zhwHcFanns)nh5fLN<4+)gOm(VR4N;=)27qP9Fl4{uq}mSdM6rX_}<;Koj-PV zr%5(_x3$)^aCRfk*2d}oMK)b87Gy#z1kf3Zk0^`IKIM_Gu+i9%q(0om;k(kaMwgn9 zl1@E7B$%4FCXf=QCEF7_Gedo4ixL>v?>x5Oyh)b#mBz+iOhsBp(=wul(~i2Pm-WTs zQaMWFExD73WPp%9_6qX2>RZ2SH0kp)4GaJgv-xO&>3n_IQ}dD1!gV=??T644!lCxB z0O(?;#`<{*-}y2OL`SAKx7fgNSp7)jd##xfzZY%|)E4x=+OFsz2~jU%nBX6(n|>rp zJDUlBEgyHsZZPtkaa37Vu6Pt=;oN3_PwhV(0CbNl3|6RHafERRy{Lgb$Ca!@@4hl@ zt82xn{5Y4*L+`iIiB&t5)%KqDsM^t5cY zmJ0Dl+3nt$i#ahD!SHjog6E?GrC+qI-_8hZ>fuA(8t5L6I0)8)-Knb--_n(qlIEF1 z=L{-Eor8oho(#rs^nbihmA}~-*7ws+8Ak*oxUw9MxyP~m{)bMeRl2Ih4FKm=Z5H)pB?z~%v z#OMgVLRsYxUt2tqUH{$EnDaDeY%*Pl>jdv?Bs3g6ocjk(DbsV-E3>Jx)$q23qA;v4 z9v@$ZS%;v*Ri>OplGvBjY4NVKS>dLa-!a)twDNfib*jE#Gl8(^TKE zhs(iBw#K(W>Drx~#^nU00od5lD%HlHiBfmQo$wxiZl2W1wD5kR>vDi${(M~;_UYVB z;SgyEd*g@a?yYU7t+HMp$L#iiK@X3yX8PZWrNa7uRfUkz3=lsv(1{@~BIfp3)`y0! zp7j(p^}9Ag_{9CdR?E>Z3suNXzV5>KEC(v{r5jkmF<%Rw+FcRET|nTlA!Q|cLQ`U> z=<*y9UUNItehFrZn)N|LkE@O;Qb$7Vt>9^?+^w}Csv6I@O+v}^YUXeWy?nhN1f55- zKm5G^T_{to>JPFY)Ub`}sFHZgz|ku=!xt=9IgGleK42_rO^%!zFuVE#p#`bjrnwaI z_Gp_c0CEmEWuSYN>lwG{j7ba%h%#HEix|SXNS=3ictJ5O3#{dcTJxSUZZK@`hE<4n z3;sIHi(OTJ4F7xk>#IW}8N$>c(LtWuDkJI1UU}KzqQI&={5i?Q*=!zpr<>q_BXu!B zidg$Ev%U3FLuJddl>1MlS^tIXMLoehp2z8am)F9<)W=$h+L0?At>h4NshB9JgJ}O< zluOx8Bq{ACgI(V{!A&xW2@ES9p}+^uz%TBhO;_7o4D z!9TqPiGT~H{r~3RwOCd#@V8=ud*QI5+b3=>-4hnL5aDrxw?9;VCc3*iX(Z!7`;H5% z%6hBt{$ZU!bAus9BnF`$rdX@o?NUF|(mWcgyvZbpTuo%m=CdkcRTdkTmkUiOFNR#9 z57)K_a$ox#MZIbJE60~dJ@>IvV$sa6RMl>QvkoTJ4{buGhdEjB1YR<6K(sbSIw#aP zxzUcqqE#&{@yVj@^4)m$ZrM`slct)gQMJJP2>d-pnEmWfWjYd`n2>Vy8G+Yw1;O|N z&3qrJwE4%$kn?SEkfyw*{WY)GZQXioZl4CFL)YCBoh>Md^IfFjHQ<6iGUkMxX>mmn z37RA|bwJW(&B{fH0M8 z%JyEwsplhQsvBe>qxbwH7+cBlU(5pXYXxK=>#lrDyjP-Gnt49>4s;fUWj|b6E0&E~ zKYwM81?pgx=IiTikt>4eAw3}cfMTB|wkbDHoP+BkJ7&ndteFz8gh(Z4xJY2I49LB;84N@=2)W{~-cOBZ+J)&opfd_??hnGv`dR&tUMCKhixX0xgB%DGWNbgW_J)2%mEhCRQ3PMYfV)8dp=>ETKGCHF2cJnSE!vy6B_>A?cZQOKVN z!iN(BZ1I3!lt7Aj4oVk8>nsN+jRRM(lJrX&i9pL7*b(2VUB^e?JRGAc(CYOSAjrFrdpJSA90hdZVMY|@4iE9eYLSZPc7;&ZvENt2w>}_PW6ed`!AMqSGko@j&A_7 zCN`Ne{CV2UyxFi=s0xyO__*n6(XeAAg-g5k_jf?Cl~^7mKO%f~sk>J{E{y0u$V#o1 zs7Lkrko*>$oYw4?WswssHOSK@@RO%`2aZX%J^$RWs_ZA<1$m_}E;xiDbj-QY!|anW z&f zo&Txc`AuUwDO!r9A@2XBwyWXW`9z;w%^*^~&EPPkg!Vh3Na|AlnFZ5@ybI@RHT}ij zsO*5^U}|0aR)?3V@-8^J<*Z}U#ET&3n4S4>nPm#1xEI5kJx8^$MSn4fa>O2G%t zUqxd0QuJSUs^K=R|Xk({lc?gia$-_()q#u_eYu4bCP^WUr@S z?#t}=3zG2HSW!R5Eq)`7iab(zG^;0t-9Ja~nYq4>qH1w* zS-M^Mu>lmG`~tkui+(jcVqi+=i`P(8$L7?)F*nORnQ|}fo+k{_mh)SkT!>n1FMbe{ z0{EoJNRm}d12cc!Q6ym+mA-2oyWaOLISbS|B1U?ceuVreIrU)AA|=|5J=ZIpaX{Xa z5Ps3gQ52J^eT~dO;kiHLpGGBRn+!#6}YNp$Z__Fh4uH1qOu4*d*ikNS^C*l z;zNQFlZKv{I_sW;8j1X{TN<{s1bAzpHYMSGLQ4y^*?1VEO!$8xcLeMgWw@*2F;{xs z@q}k8FE2iQmT3?G(0Bk7hx=o)3Lfp+b=)fNWhclm(V%K2J~?>EP~PR+?UZ@#bEDw% zfn>`rOy2$K4BnH0UMBElm`ENXCN&<<ytHu0w<_}GONL6f z1}g`PslRR2Af>gslWA=N9=(@#;@Kf*yApvB|J<`C{)NeB^wP>2N)M;orWD@BM{$qr z-)~{9Kf)*r?6u8oucIOSqvxxQRr=t;14Eg@_r^xbEBVC%k&zaCEpT&lr4w^dZ0-3& zu?X&i--P#7ubBth>y{8M|M-fqyQ*ywowUDqDE}E`E{wE^D$LuYcqG0A>I26l9|lEkx9v46fYX7po^&|iMyK^F3i zoYiIAHYkEJ)h>VC{5KIVz27cJ4yaH$lrXLLh0!l$?zhC=x|*ilgsnr3%!{=KpGziXcxN@8RB*AeK%=xzU4W^(?f@Cs<|+CxzhZsj#^JmNF)V#;Gj{G-~gj_ z>5CD=1Z2CjZ^+Tf2Mwc2QdUi^6p6O?eb_5>6}6c5rOct+AM$YYkllHHysRr=G_Osh>GWlnwv(glaHQRzn4l6VbgIyB4QNdXKt{cZHSxOf-YJs-Wo&66%|3x^yS9gjMLBcd_*X8K#;?Fj`?gPn|x8q+7 zp6FvHX7(htT8c%nLr{c3TL~j#%3;HLl8yZ15r~Em;BbJ8H1+}7J4qDpSm+6|+x z59MU=u~_3^V=s5wc@?^fg=IT#kMy8ltt!}JW?8vUH1+n|zy9WS>5n z8yrjHUHJ+FT$&4-zf!XvrW_l&06UjcHmUbo)z&cq|0@bo7?82fAdlr9UFz%qblsOO zmmVEI3G_sHd;;Ve;ChcvvHg#4po6sN zxvIcJkE<=R!v<(DnL8LcbWW^v$~|6SUmXq) z2YFd+DgtLy7JnAcv433R%Vy`76KH`7wbP7FcA}4GNS$T_r(TgNFK?dQIZB0;;RfP#1wNs|B2Hbbo6f`@`p>dBRJ>mb z4CB*$f2d+aCE;+g$YQmlOm4r|zqh{nZePBQxZHmAzoEWEbaVe>Jca?(bR}N(wAy|g zCB2w zW8xpzirgc@CK1#tMx4ZLV$8&)oi;KOzWNUBu51e5mACblqnzAvZmzmF?c{ay0$xEM zlwbq6u@*>Frd>$LGv}rEJGva3No>*GTZCFr^};;-dPpt_602~E^9@r0lhdh4H9qHz zIz3d0d+T*QTaFyOLWj6WOBd6RhJH4dioJno>MN(__Zz|17!;YDmkLOm=qOG2rLOuA_orz2OY7J=qYRsd z@<6c*er8TG*t9qo0A~t_|E%B)vDq@bIvz+OK{s1%d)}S;&9%zkfXnq^IqmcbC>k+` zWl2?m-*k}lA^^P!UGf%3G-Sn_5Ccf#z5RYF|Hdu)@QOx*j2olBOP}+7vriZo=L2i} zdT$Be+`<=z8{q(zGb0T!XkCR55BCt|_3H#g^RzNIBX z+^Pybbn&^5j{r0vK2DBSsgbyWGekzK-G5_1??w ze!h5~M_0>qFY9+#K-^UIV93f5Pdl%^@!!7?Ueu})ZA-qU9wxgnZdB08@dTF2GcL{# zc!z))e#6J34q@$vgFx6~Td+{Ih1r9kj?A1)cvF|0TcT|Uwt4%~Wz=U`sntdrpRC#b zg;1r*YG?7S-$l27UyGZ1h^^MLF@Tvkrpxx&KN#dgB0yTDC~f+BM0TA>F)9@0Z+&(X zdtTs2$ToJIgs>yolOwM@h4H|j(hz^ahT5|*!G#3%HHxFz3Yf%`OGj-|bux?~=5F-Q z^Z$UlAh0-W_O>;Gq>2t=pf*<9W3+UC-dXm4f@IC{2q7;xQqCik$*thB{Q=F*@;c-^ zJ2;ZIaKv{~crxepu=LDy7@`c^!$xCvPH|MJB4KYD2mYs!cwv=FzlFR?`Qw zXA*KzF;DzX2-8&G%7Mzm4p%YaXwbTDKov%0Ln%s?4^Gecr%wld)Y@u+5#Q*Uid!J8 z^fcD8g;qHj!=U9JK_u7sWW;BVmsVwI38^y|z#i@=+TLn2hbnQ=X{OwQTEsX*V*VM& zpAm+8%?O~2eX2+&FeZrY3;vtV=1uiVpRX*Z`ndJA0lsBl9D+QJxGe~ZLROH^`Pk#N z1uo*c#E(Wgj+M-{wgh?!DeN;^<&4$(Zk{UbIHT);|69932lm`1VDM+gEQN zg7+pdtC`!TX1})3cPFH%etT+s!AiHq{TwD>d1Lq7Em5F+(}9R@1*~j`^)#*=N`6%h z1WnHy8{C;YWx6F;kAGMJ;w{pPBnHZn49{$5WN|&nd zYbXJgv=)*l8FiE6udEMBZ&g=$;h?Y2pu{eST*@)njWx~I=j{xu-ZGL)w05f}_I$B< zuWbG24!(LoxQ`I)AI%IQZ_#s>bC(-?n<}i6sS|pi|a=^Zu9y1W(OE-X^6+ zL8SrOnSAV14&A3no1YP&jsEErRzh@}HH+?-pO-TY6y>GiPjrdJfD#g;fEE^;@H3t}!stt!#{_*R>)_&a?T zc(7uJyXbqg7PX;CG&zqq0K6~Zm4p+f$@TYa8|~EEZBl#QA}Ln#GK6jxLwAyG6Xb7j zD}jg6c2FfmACM177E` z7)vG}afx##7_o8;Cy8|l6Kr;Zz!&`c_Eke%M_KrRGYeA)lgn}0hnm*WM!ns|Av6pJ z*(C;AZNO4yooI zWOk7FjrZ`G`L+ytZ6Xcj&~H;(>R>~M7bmYC~J^qRIZq(+x&!db9^ ziW-uT?|zqkYUm~_L*3X9#z#*rKdjQ!OF3SJZ!L3~5{{DvV&#Yx4+gbF4q{xbS*$kc z8**V&HY$gHKQ&IC&@%UV3)ACa9eIrS=a-J(?gCWUhZuiT z0po6NY^WP$u?0`!51yKXtKO(;7|wA6`^UOO;IpD6F3HCql!EpA7wMb%=hh}oC3Bk( zUl&Ea{a^p8%8TKrBa=g|yAk;KQV;ILK3#7g^T{(f8o@H4k2K~9f^$bWC#DWjHan80 z_86=j^O#RouP2^s*pwD8{*fVk;U}eE%irP^wlQP@nE#Er-VJ+9bb98+j54_xUP@d} zt{t#A*~BQkuo5>+mZ`1*Mq^%bBS2{y3*3+0R^uW!kni$%u}yREa<#)5BxYs&c@8pA zAWeFA5-n1lK}t&G!e-U*Z-;IPxH?~axAhIs?{sh3VN|z{>XRp)EvnjBE7o$Wt5MTk zTCVT+cHJ=}=y15Gn#)yM4}lX)vX_24_aR|PnYHcIi#`qFXq3#Rows6M`-d1?GI*ro z=E;gjv-ICcf7i;ZhdHe&CRJH7ttX*?SE}1&qlV5Xx;zHibpuBR=j#d7~_V# zQFdSI8B@xYME&{W2xR2bkjFfm)pm`Z9L(__DHmf;LU76xcEeHio{6B``tLh;KdFo= z^&4&_?EcJtSw;?|sun~~p-a2Bf8{$UYrUdD^ZBE%!>Dj5TBcHz2TfVznX?R8(6G-=WAf~0Fy{3PI3Ib*T+X$>rM z0%5JC^Nt8lPL^V9S?y}LY+P9BX}wh)dZdkCqVV)mJPyNVMJEGPv~u_whec-ht=f7! zKW&RK`-U@F@Q%jjz}+4|OT+NGwfezd24@4YCqZtM9&kaOfD>cQmq|Q*y8I%>6v&sm zad<1|Ga?~L())_p3vJoy#YJ9O)^d5eiI*UWFW~PyUuFx%xR|a(b3=bAD%E$#;S(y2 zuvG@%HZ)oQ>A=vb>M^ap0djLXk(>6rRm&m_&ND8%`i>Vij`<%hY-cxX6d-RHBQ{0i z%}`nNwPRdx&rNNG)m!ErvnNXc?&ph;9yvSHEU@O|qQ`15n=|5|zI>s~bb%KaM2+^S zR5Iu=`TX?C7$o&eSD)pylG7zQSZ%|?s;1q6h`NIjtYv-m5EZvdUi@2`bbzg;0-rY-<_=xkk45&_{^g9VXwkw^t(@bsvbOY=AksF4G8u>zrv&1Q)*H(b zH?^IHl%*~wL)WEa+1y!=uK?-2rxAP%M1ZcET>A2j^ftGA>er_0xzk|d;h7{!?+m8e zMudc`Ys^Tt;d}09!x&;J8t|0ULS?onegV_0FRtl1FFBNvusm8VJj+!4UXljc5mBtVjZ67_DKJD=Tk#!bsQAPc?SLp$i zngNs=x*I_$9lCqy7*aZ<1O$np8v$uSK#&gU9HbkS?hugfhI{aR@4e6O-oJo{v*(<> z_xi52J_V{%X3Zp25k!D*{>CjRR#shlZ|}IhA|F0L$g5>A?+=U@CTr4176mFDBh{a( zA?*4xZM~f?PpzX0L~hOscXm-R-oAH>`%!_Pn%x-L`NF028)J)|hyoI)Y@h$$y0jw= za8kJJeGkPuO#+3y4G=h*@*N`e<+)yHk)FCpixd;i(=NqXM=_agf& z+W4Tej=^X@{mtuq1Qn2+cJX%M7J8Nw2ZjorSm{*URBur;0P>{9bEKTbiECxGQ4>SA zJgYpDRmWz46zb5~8EIcqQr9f}RLtw)Lp_@9BeM|av!5|eEq++ryXsi2wVWY$0vwSl zPtChOO;c?cvq4|}ii*I5K{l!kBIy-M#T$LfKj}tt%4u#`Cx&wEoU0E+1iHIwy=g|I zSrJLKv&#QEPo)wv5;%OMd$V^Jh}HdgF(#4v%jLPL@)j#u%Vv~#rRmpa!=mVs+tS5( z9VEY_NyG?QgWBidpVU7EttHQbZX^+GW)Zrh>;~JlHLcYTM*ogAWuhjKCv_x4{2^%=z53PX_CZ z$5W>0yED&AcfB@84ApMS7!2ZSY+uXg4O7qSscR zPj%BS1n5eE9}6D+Xr@d%$*JKonvAwV{n)}KKlx9g>ESC(E21Q>=bpAfQdQ+n`Kf#< znz!l9Q+hnyQ)QRzY&Dr~hSma)wm0KD7PayTEc?t^u@5{I$*)t8tQuUYhrFzl*Rg6oTtu>RzRbf?9r1qtoic#Hoe-afVOqCC)2A_o%Lff_&M`_g%N*RkFWA1F)0z!xy z9zH{aZhOJ2Arg!dElMZ@j3nioPLUB8%8RYq=eJEUw=y=*oTrff0Nz`XB&e&i^HfPU z0TH;Ngh%vP{BF{L@!He(8vB-95KsL3d(t)UuVL@ZkvCeGufwy$0CAJCI^7rx*WKrF z){)qL{f1W(m&XC@Hm*Fq^&QB#|-SvBrrc~70I$-_SQY=@+&5wRvnI`e)=qF6a2i3j(MHStHd!4|T0v&q=20g2hLX$~~xqr7)OgV?wEaO#f!k(4|`CT`D zN#qKa;6{=+fMjDddfQWQ|8)Vb6llT7yd}IM=~wL3{U)Nyn6{WBFQ4XKI43_Lw$drA zs{_%w9l$R>yBB1~Rz;Nl3Q>hck{9(t?2{s>35UF-gdG+zYaC_Tw2j*w_i3(wmXenW zl!R&IyxMlO?#xeGtEhBV^MMNrOKU5Hm%6C1!+Y4ZI%hV&^SOlaDm{|GeHFVVCFYcR zA|+XcO=iyM`7D$SlIRK}kniEa8b52>)q~VSxX>kY8ef~3Jh;xfXkXv3`ITXW(;HNM z*mOG4@v*INwPV&PmB~GQvOqr<#TO_`O5V_i&h#i78w}HvEO;}T7*W9#X`6aoT3s6( zA22rbqJh;sp=OA!Um%q=lJ)CXLs6zaxGJk1R=2wXt2X9y$xL1%rxQ>nyMCmWM#Goo zky>Lz9!c*CmOU1kyjSgo7piY-m>Rg=gk5aqFI^jdwbQ0PIbXju7ILx^6}z?I4_9I% z_=_$d^y7JE+;M*LS#^Aqtr_wGd-AP>hbE(`5FH2zNRj5Ls4acf+GVJO<)}9|Bw6#o zI%ziZ+02H&|L(77GJFyF(bjrIHTz>Ei&UQ9dU8uDb?D zp)TKwjFHx)5_J<7aQ8fzy#tz7eP5oV^?4`bd*=%`nLcwo7vs&A?vMsiC#&itN+8N& z!E|pakp1g+ed1SiC2wEfe9_@qOi=kC5khNuP^}cdeP>71aKagR4a?;17w)z5DKu}A zg$^^;H$`k(*fG-vwLH_sG;*1AzF3cx%>DC8??3FXsTp;5!ZjU^h&hLr?JdCNzaKikN38a zR|F*=xsOJ0pB84Ii)#-A;J%->7EsFpT>_&?jFnBZS&7Nx2yo5F<`he+4rF$ME+?C8W1= zH*zwPgX#I_rRu)@jl$uv>00mVFr_KQCGeAX-i)u;!{EgW7lE!v&pdBMh{B%*6IaiV zg+J3u0_$<7FbDRsu~U+~#NNF57OU);^7rz?G1K@NQ1HF0zU^17se19`FB7{>!^=nX z^TFQQL+xi*FZQVx@RDBZW}AzQgr?3l^EC_oj66zB8=o+o2!?BtX(%IL#V5_r6wdpSwN>F7(aJ;NHMr#sHoUI}>-22uY)ZDG- z%*|VgDNRfZ17Z)wkzBzg;@G;Pp4{rQk^r7?GgfW*KRGr+NqASki6UxQ);w(n(6T8_ zCOFT11i|A6Qq@y4s#fwB?Xgd=#A&W$e+Y^Al@cbkMUXD~nQ&dZpD)gHQqN8|lMA{# zwEYI-I=}n%Al*Te{0cX(qrnaoam4Dw2>Ow3+Mu=gg$VRTs(wRLBHKYilrs;8i}Z>j z@xrM;s9`SVWRlF&FcZlX#J$TDw-tW38u(pf{+Yeh!MImOO7cfK-9IlKy!UP@g_;m7 zPDlAhlcR=pvT(e-J}3QbRHv?yrO0w5*Bo3{XJ{l!YPHEu@UL zt-8rR@`HCSe#`o4+tyF_a=Zc1lQMc`4tc%EPy>&rP-zc#x#4()o>QkY5VzJG)yW<8xSll6hl zvUoA`pO1u7#>EM6rxF0Q2VIQ$`2yB&5Zeaaa#i8h<2?W8RB)ezn|yCCsK@0P1;9mV z4_1Gu5}T_&W(53-vmsXvc)mYwuFH4!ZGK%85XJXk4Xe?3#Kso)bWgq%4vq&~eNH3b zc$~~>{d)?HF*4A23sPq|6aF@;SZHh@JdmS!a$=0wTVJy)xQpSyesKF7s05JKQv5J# zv0@^UgEo#fLO2!uSOKrs!% zY$P(q!Fnp>*MoI*r9WC(3VW{hNucu=Sc-*LYN&h4rGe%7P9{!2*6+^5;rx}}i=Q<` zK5m}9V+#GHU$&l<^}PQH7O0Q?A(FE%Xj8!bn#85c>O5a#GP4uE5+yd+LixI`nE!2a zWSk;roNTFheN$F(+{gObSWFN3*Qp6?*ni5uZO+Qd<&C$MH`=Ht0K znsiuRZoU2nXbCU(zEb?WV$-B@Y!6cym9`lm5#?Y| zb%-3-tP6&o$t}5XkFo;-ImCL)Mk#l}l_h{UO9{h$_ZQP2Un?P5uAbbZ*1pX+qNOd*$lkfZ6iVSO%n+Ry6Z2Cy zVM)HXscxpFURv{!doNZjki6tq3+jNuF=MK8%-KZ0q}&`~R~EbaV1+OpPlS@~4ci0n ztI(E$AoxFB?5LE59bK@GWZ1GX5d;&PFNtAnwnbJmo&l{d8SBqLm}^A1C|OWb)pxLl zJPU~fm%S20Z@Z$`$1tT$3tH?h?ZLl#L^5!nQo_U!GNYtlmhm(VzQh-+E}2v4vgm0Q zgK$Bcx^17p5$|_DvtP-Je{o8Ii-tIu#7V(ton_*;CT1ti#p5{{cV!P-V7&7Q1fOcx zQYw>v+eJ8#Xa7FP157>Mi_QE0Sx8HN3ZWO}qAlt3R17k!PE-;<++1!!q9XhH#Q&XC zi^@uWx=kG(kCW80v9rRKH*9Lxr(=JryUO<&>j83s^)`#P|6c9{#(NHdOy3gQ0?Yn9 zWK!NhhA91yg}moyds}ia+ND0&5`>#)q<|fjycLM=slm?FyIY(?28W&93trhvZ*W~6 zpN?n@AAV8HWBD|4_QFvZnYlt+XzHaHi12^$7V~@=7btxEjF14ZT+QDaj}-s``3`{IOK0*#wdgk<< zxla;I?A}35+ytN8G-2jem85j1LFDg=OhpVRFUKH47jyA8C1Gy2bQOUkUWdpHH>(!f#8b z!*`dLr9|1tfI`7_lzHIr3n*I_NEbmJ`oKDht5u1yn|b~<0`NW>&yU-7O}3J@Ghui- zHgzxJU_@TM*J&UF7}Y%RvU9RTM(|tT_jcOk!@3f--w2$D4iyfMC@cj#{V3c*h8=cp7O=LqEIYIB4X-WLwo6t{%<-c5u?xh z@1#xh@1*UF2hV22$u1L;lRIJ9FcCfwE?R=aXZ$%QA}VD)t2E*@7|tEJ-Y)Fhn1=M^iSSBwDRbd*80-%;uPyEXYy2 zrrP};d!XU}T^qK+__00>w$&w9(X8lx?y(!%_TJmR)g8^D!G8`S6&6lx*a{@J{~?NG zO=Qhs%rVhFLPeJ@B#6J<$Z4Wh{`_Hlw7c*VJ7Uk&QE}7q^^UaH4Rl8|R2~KJ2E3D^ zkk@~iJw0knhSePbWzL)IeIG(+(!-wmWywYm1@+piu?jiL1qII^`I>yVlft~?{H0v> zss@O_=h>bC3)mal&Hp4s5((5btpn|P8__asq*)=1;)DJw5r_M|2_#}KmJzl|LX#;B zoXVQaAF#}VHzkp|`~f%+AV|&;OZem82`RgyynJ9zfYfX}8mItV`d%=(vT9+hXtauC z{K8)FOF>K#D4_@&dsR`b{bperO&ih^CpJ(gZy{En!hiNP@Dcx*US@}{zoxNM@7H9234++W5xoLsPt9?s>H$@TD z42QGYCALV#qmC!(IjW1x;k{B9;t>+T%%BvJ7qLuFMskqK@<|+MUC|iRwG&ssokrv9 zUwR=tT!mZ<@hVX(qJ@{R4_}i{$lTs*`4nDw4Py-zy6n8BzlR5 z+g~rd4&=ailT7MZ+2W_*U7SpS#+JJdJ*fR8v6?1gu#8#R=aR3qqF^yNg}dX3NO=&K-9`pPG{p% z>i*az{N(vkMr!%k4cH3g1uM!vmINPiCfL8#XJX;o& zZ2b<3!0A8{Sx*hynKLvK1b#yBQ)FDZ zXzv64@!L|jfVD42PWoX@Q6O z9odrHIvKCGTzhkDlQIs?xgO@oR)9nkne=q<1i=7v_TOWrrZarme0g2x+;*^16A@bf z`59zIT5QMU^%jJRc~lxHTK~6`YRc-pD2r99#1d27jY#t*g@B7=)UxJ!!kF8tYaD zwg+RgQAyU_>iMa&DpDByQKQFCdYZGDRJWL!z@A8G3g1@HyFZGe%7ZvHM$_Scaf|5M z>(E1mc}rJpJ*4c0MFVGz2cyFmLora)4CyXiXAIF#^?q2{E{y26zH@VxjeWDWh3} z>GL1@s#s+*^Lq0YnRdR$N?iIx{Q;S=GGBkvZrAPs8GZ(+cd@#m$JxhK>yIyWO%BcX zN#hUMhi5#O}F9Vma3}ruUNMT;+MC?(1XQL~=KM#`eY%aJZW$^N1Ey zMG!fA*yM7i+QWd-{9Z44e&V36HS~zpd#lI-S-!!in>zzz4 z1awOxX+~k0DfOI+MD`k&8Hl~a=8d{sx=NLy=-e+IDXOoggUKPpJvTgRaw_#@o$p9W z>7Te$?uPF)lu<1NTA0mzHcnGy9ucniL-jP--r55{l>y!r@#kmBBv{09Eg&|L3t7yK ze?k^lLE9dV|7_(~5vW`cfeNyXRcrsZ`7s(Xk@1btv!cPDjD*U6vn~W8pxG=B5%nJQ zB$e^fA7dC{aCdVehJimFR1S5Uk0E~s%9_b4SfieHZs>rmJfS-hsKdo|i#l#I_ER^< zP=PhxmBwp88q*{*G-@xZgi)(WvNV5VbuB9$k;pB(_S2CQ7So_IdTV*@`Xsw+5>Tbd-re}mu_Uq>3P4vLkjE%2fr+0J| zVv@~xWQo84i4iU<&ByHKDO$U1 zAh~>S@HiP-aZ2a3ZgF;=ZVy?q#4@j5P!N!2dXc7i53w5f@ZYzd=isHk`Px+LaQW%1 zp5|r#m;!od++?Hnw#ZH(<4*u=#YdEoOhQA*@#l4o08wS7C@%mLh5I;6lgK7u%6xP5(!L$#bi`m6?idnukHg&d{hwac$?*`8O|F0XUK*|#h^pUjo$&{h_F z1usNgGjG8?s!bgg*oOFPmMsn0sH*MITiel=TKAu!^Y*@G@93!NcJA_~fSi4cv^4w) zwR(aycg4ZwvR-n;Wcxvymno0|!qwe0kU%J_0m)gncvuA!C8l-0)S4rdbsbs2z0|=8 z6)HYeCgsKaBdl~yfT>&WboQua@(5#^-8yhvqpH!vv9in(=p@-Jimj@G1zv)VRx?j6 zRO{F#%nU;MSF<4UEy)75Hjus#Ks|f!AGgV?PAoF9G2(GC@_ps3AF^z#^z|Z7#?9or zt2^I|?B71Xh5P(P+O7vVO_Cbs;*xih&u2Nuzzjqfz1nye>ZR7&e_{#UPB?GZHAzJ5 z0XiH2QwJiKqiE!m|LHX?*c2iwn zZTL&;MBi|e;=UxA2$fQ{R9Gn%v)Y(b`^z%B@P{?AvK1EQ4l+t4Nm?D%fQ0k}TWBm_ zB>zCp=&WTYRhJQPpMveg*$$|T0#hJm3gU#sE{H8#~`SnW@YDo?;a-=LOdPc zIskp6Tlt)S3&@Hni1%bWoj3nn7=wqVXI zR$w*HgTGfBq9rnMjA7IMu-A&^b%qHdc|gQ4X7v`|qSugz^*0|?u!ALnMv_n{oE^9y zr#Rue>f_Rr$BVVIJCkr~@Z(ZsVHn=gVlyrX{hAHOzyP9#aOO#)Vb4xj{sG_=6x{r`UM!}w!<~o%&J=-U zSoR|@{-+nD0M4`j=Rr64#})qqSH`mgUu~U_VSn*m@6MdQa@KSwFadDbB`l!9W$t}M zP^aqdEclChb3|VS&x!&=@5_G1O5jdri4uq{xVJOJ>73$&k_B)nz^;q4^porbJr)80OP5{PpWu zb8=6;K1pvsn%c(ZpYIZ~SB@_D;(xStXjiSBp-8Yx5n6^?Y(v*&(=T^UJtD|I>rds7f2)qNe!-Z1Lu!$$<8|Jc{Dk>0x25&cz7K* zw{^Uc@#^W$XGyAC-xxQP%Q$KGV@FhGU{U!>kKVRe&FkA%T{>13s++T0>l5A8IrT0+ z7uTDAvkh-D2sl1o@h6`(J;P9t(7!ADV0AgrXek;y`oFhF7NaJDL3&j#`qS$d`^SYw zI+1=U^T}KFF%jIBB7z2!ANm%;<5xa>Mkn>ItW@fer-ZV}1r^7Bnq(t(mU^f-shw>j zffR_7gX5)(;3%d%`c+y&I<1gO5=y!!&cMZv5b(JzRUmDa_a(z#k{Bj8W>qJ-7sUD4 z0j}8YQag?@$M@5d8!;h|9;}d<3zK0MYP-gLW6Ea|^B>zxI&xh6S_G9fzrX1_c)3Xp zbTt*8j<|NAn`HMlJhYjL5GiHc+>q9{ef4htuxK8s6L?#&~G(@!T(Hpd2NN zD?6473#il#ZTCPT_pz-nIBJb>?y$|Y)&F+Ww?iLdM--De6{`CHN!P2CMrQu_nooM_ zrCbpxd?;%*I{AdkBiJJ;Hc&540ycNbho3=@6uDYl7@W>Z#gwN^A_dcFvZa#mt?AFb zB|jE(b0T+*f~7irMSy1lXxgbjEWrl)CGej^!r3jW{=n2W>o7DWo{O@hi0h;`keHSsZ+~LLB zHl4E%;doRQQZv@@U$@t-FP{fDv9M_41DDJ`CioNAZ>6e@{sC5I$iHLS4VqedC7*UM zL;(1J8g75YGeLs$nKDnY)%Q{5DSUT-MHW@!Bc690wEW3dN2bQ}Il42xPb8y%O-MeE&bMN)84+ z{n_3Yh~MhEut4>>Bj$6v2S5cFy!b&9CkBLT{I1eu`S0(xuBf{mM=oW{0=ZWO>@$7? zJ9)p7^Y%~L%f4?*#AJzg!PN+CK)P~dPjv~`%2Jtgo>g`F`+aAH!D}Xmo`#vYIl?a- zMqy9lhhrbgrVGhVfuPj|fV6`3!0+b!AMLX59%N2h@4j)52Q2s=b{*WkTWL&!C^DCD ze~yP8y#z7nvzYG|#s*!@{0{T9*C8J-5>V?YZ{158ZOai|&4=|zt77}SUBAdyvVS)3sKt|Fz_9Pb@Vm{d zqddQO5v;wm_TATXJi(K)?kInG1tx>$LR0{(T{eE_a*memVEKX8w+C_inT^6POr9hZ z1M5{qLl^#^S4R@JKuE*?Po%ivN1-Ztgr4CL(4mL@kqzr32ku9fFL!xcQhD;1VqLeV z%jUt;T9xrP3EOX~ildSk6N9Aa*nXDC{_5w^U1~tjLH}JqiltxKDPwZ-*OL(3385DI zh11W~`hZS)LyA&PCx9iA3nxr|eW)$j_z_TjyQ369hH2oYz%u$1?Px}yKWD6|9t%{X z>G`!8Htql_L^d%XOa#F)jV}qy?A3$;M-Y0P-Pu;iPp$BkFRWC0=|6-GPh3%v?`Z4u zwrK9Mv@SL6)iRqoK#i<_+A|Pn{#m|bI1!DXzpG*Q<3ca}@&9&+M}!}y1Y12>8#x%aKic&3cYuN;2=FVG7-OJj*N0{w8^si zlS@I-A~sKXY8IOHA&#AP+j3dI1G~}*W}7MbX`7EyY;E%Wv#8-Few68xtcWpjQrSBS zhKYVXdiRMkLXwuR!DEhe98D8I&qNV{6y}r)YaTIV<6HX?@dLvlc z@zYp)c`4Vq&ajeTd^IvK>ATyZ^zxqpP;B#8SehOv%R9fL9y5_p$d|%MGWQnWPuPLt zktkb-xJ1L}56%m&AaNXLGF>AQlXpe~r+F4QRgL*Sa#pSWW{-UKM=kq+)+Ocqn7e^} z#HrEJkkIt4Kpp`E`r?X@`8rk`@%cER52nc*c9_v7Jzjk#J zw@G#!H-ANOeJ_3s2O*K;{!jnOJm;apFQeyM0}>$mcfWpYgv>h74dPhbRl5vX!e~8k zvMXC}fsXeD`KY$Bybn-`V;=T+fGj<$^ag=BnfCuJNEit04g4_KF*JN=>7~QxpWA=< z$2b%~Ewk#%@$(RlDJg|O7HZo{*%||8+tNFWohWO6+a-YXZAtE_PO~3!@D%x9p-@6e zbB%Xz)ASqROgto<3EYNF1;k)dKwmUqU>k@|Qux?sVDEaCPZN*YX$QY$N~|*`1$;=$ zf|IysHkw_7f7hHFd|uo2XAuE*6lS`el}HjYl&SwaVP@B|MC?Y~uYrC=5bW9Je)nUi z55rgch^9{hxc;3SuM0iP@>M6m010F5S?g5(d+)Px?HR$Wu1_W6? z+ZOq&b$HE(o_#_#c3%Db+huW`A`F3438b0asSKn}0_)$58h*Pc=D!UuL(Bkqt47z@2`=Gr1Y3pFYA-Rm5H^i3t9+kvj%$Xnhq7z7#AKD#6M(`|M7eT#vgLWc z552uPGoG1W3IX~+Bw5J5z$VH7$A9qQt8DT5s*i=r3>u80Yg|7g9OlC3uzX){Nkzpu zOH0#drk^te5E;OL@v2zCK?NA+GCXJHRsG6KhoVtB;u`lNT%Y4OhtV)jG8`No`Z#A? zpw7#%yvD#kUe@e%Go+fh&w&R4;Wy8o7ts{5Lk-8(h8ONQSWz7{|lcf3&Ps|*U zT#_%cbmX&@?A0!%UO!2l^Z3O`JJ6C?ofTlEF&FP#ACD39=bi1t%j)Sm{4e6MJ$=Fu z0C;k(!PxyCN05bq$XR_^`hr(cG2{m| z{n!=pRnqJ5i{$le^r`}~J&)NZ%d|i}M8Soxmz}-(viO;rvXiGLx}i8g=FWmG@y`8~ z)ud(n?P?ATSVra|4p>My(*PZsF^80wsD+9~?I5w;VRI~xOw30}uht=I`n#>|>OUr+ zEFMmTsI2C5yU7nK;x{B{n0T)m-FNP(Rgq*oKwa=L@jX|NFewz^r!eM_;N@FI$^&Ql zukwLW)k2qp3aG`mL`mPYfF@L%UJ6D1TPhnrZD3~GelGxcpv};l=K&`(4(`+g%+v5h zNFdG3x2ChlHHw>9*XMLjJE=RCUmi+-vjQ=_VD7q`lRdC&`~BdGCTBVFmk<$aOr!w8 z8c$6E;5}LBH9Bv}!`SX8zfdlbkzTXHW*{@D|w#sRa`F?^7iWLPLwhksg=IYKL zanr<(_$f!Er)eoA(&?(%US19yx=UUD$8yzqpOspIFD|#r1jf{wz1&v&5^*y#Gj)J+ zedR2%8=}q6iAZ=N0JuS%ZclB@)N;La-wwTpxl2VrHs+ffH%t>&8>`h_b8QbxT#R4i zJO)l9q3WtU<$4m|(^U;h=_!+dPFKNh{I2N*OEXkHPPwm{R6cpiza)PKf%eg!GB?jd zb!DY6)LYP(mBZ-Q4^LC`+VwwrNE}pa7uPA*=ysF%58vfK0wC)w$!08g3m4^;#MUz* zeeMl2BPU%ghTF*SZxkJ7>DRtS+c@QZ`>RyZ3TfK}$aidpZ-+Jyj~wUscTHcH*?{tp z>A7aX9BQR2*v=N;3`*u?9j2ZmST-v~&5T7SeGePGio3r&gj2Z?#v^te63E~}=egD2G{$THs zJ@@(`85CrGb-HuD5+lz2_KzaJ%`a>Ky!d0eGeQE8XA;u{9ZlyN+%_i4vZR$E9C|^6 zX@X|IziE9E_Y-Sy-wDr_2(f{rwY9Zj6GHwi-ega>v~1AlNqyx40ySBAgKzZqU)hla zX~;F}BgVS=B~p$w0vv_8y}z{GkrcB0&^T$}-UJ%rEFer=>rTST;~eklvxBl}z5?X? zDa#B!keF>N=1>+Em1yOh-F9HnR9KT>@2Pd=oK_|3{e1F*AA9QpmA5)+LO~?y;rN?i zumIUHdmmrkwICEAPY&X#jBBU`}!|LqcQ1uEU>=ObIZP`C8jr^TtW*X^$Y3O+6A) zXOqMICwtU`-AY(u3jmU30*k3__vvnJ^J+I6#-;4F{B*hn?e;NP+{v zpL`0{jg8ws`9SFHvepMyl7)Gl?UoQmQ~RDn(RJ(n?!=-gUIgs5Ts;OBtHXP*`D+|@ zL-<_Fd;JzE+k1lMAVVbjpEzcD!Z5>o9$>)_+>3oIkgl`ki)=^HswZ=acg936M3xNJ zSfB)n3JaXZ`uF(byEhN1?Qdl17zIejC4VEeM)$9RH;!zxXn@^U8*QkBuMcIl&f7eM zX(ny@0uMK$YFiRf6VWWBCT7M8s8q?Yfp6Ha6&NHr9^yRPONN;Oy@I8kZDHJ#qFUle z8>N%wFaSYw;3-32oiMn=%RMo(uM&rMwmO%cXWmReyB81I^JaA|;`|{ct3Html3-)= zQPV`cGeRcPZkdaBm6VTzm`Kitkd~vX?k#E0s#t%O6vgHwkEuTwZdg}E+OzEp`gY@> zpR{k_huafb7-~_=1kj@AiVp_wL4Hw5o~6<`**nWy7)>&ioyHr1+8cHWNZh zMBY_u(Bw%N^WqCc8PGZw_3PmaCa@SBj{rfVK^?}5>}AOb9-03< zZ13r^T4^2SyZ2AM%XWVIG}anS2T*FeFD^y}z6LBjoC%3B{q0u6pgD|40YTFjs6OT6 zh6M0^LqPnyPgq1FE~Yw)cwzK8u%!yQ;CcH(?$%J$K|Wk$k{oBpoQ?4^2O)uO*;}kD zBdf#N=UvTLh1zpuyN;Dv^1}eePsRHsJ?c;edRFm%ulQ6n@)M6ZVjP4n|Aq45oI#pW zw%7YL{s^N2lzZZ7G#z?HIKe7MyBVC*UZxv)5@%)rqNgMm|KYfT9~1F|j_C6_lN68d zjZm>r%cHHKk`4et^F4?hposuKVQ#{(``<9SVJ#;9)76i~1`Vz_3%*w+kmSQr8S15O zYQJBm8hG4ENc!LkcGo8>2V_5}A(;^{HyIK(sx0zOSw&`7DKmDwGON4Wo0?|8&`Lvg zytNn87WHhu*v4Y;C2Set_~HeZJfW2{3n^_vF&XUcZG&4Kc5-ju0BHZkKHL5)5~!t_ z*I@g3)ZeP&)PNF}gVP;TCOw*qMWKS{t)QCpm;lgS99~z!$caK#0rxFEQCOHn#R;8d zTdJYrk}MzF?w(6_R`6O2{Q^lORf$zs|3{V{rLDLoadxtIJV=!FyI}wUUoMv^!(itTw# z;Nf80nO$`CJ^YTMqq>z;igOc%^Hlj4Mg2SUvn**NkR^~ocfJ4G)3R5(++M#MNE3Ww z))hJWXq)^i%r?S5fl0;maJ4tpn+)w3ATAfi#g@0dci)-LBg~GYlfl&0AQRU3rpYq+ zJre$mn2{Dkb5Sbp$@z_-NCkWZTgK^;DSN_ap!(tmVCiea3uCQZHpCH=(MI$2^^z`~ zpwRsDESxuM3~XUk)A@`FOHR5|CIjPl#f~Y7=6Q^HY$T6#rxq^+fe?>(XNTTb(E8g^z@bvz|?tJ zDF!|}$p&d{UML4Af(M+?W>{KxmRNGLGhiUbuhzfLp47qM~Nmi`q`dLsH~)w0{b8AmhwBmhF#^|^dJ+`k;yS6VCwQ}YNx z@ku9J6Sh2Dn00r@n?HHXdkB0EI!PxhEpTsduN-IK>)gP1`Az4}Ok0gT_9uQq6?SBL zJh&lk1S7H(umkbckT1J`Xl^FqdOVO(q#3?-3SX1)0G+d) zNiGuzV-JaQvAEFK*w|08pH&}&Hi3H}vOis9(OYf{w9rgY!REY7!|g!9K@~OCW{JCL zqoxnF5`1g z+3RpSZ6HhUVE8gDBdxXjm*Z7V&+6>!y{zZT0V&Y&>a zfXE;o+~7_#fc*{-#Ias_>{5J-C15vhF?c&ei{M!V3?5COt-?A$4Ro0MOQg#)*QO}R zrt`bmSR5U~8DwRx{?ZN2OjR^+i`-m=`rWRKsl-yHw5BAvO96X5?dI4+API7Q0EoP3 zVpc)L10Ge~csj|(W_oe@?Gai2ofyc?GFhhUz&e9^51;P7JXkVsZ#IdU_qm{Yl;W*% zP;6WK{%)=AD#++~W?}-+zDwxvgjrJjLt=&k?lh zuO`^G{8%C~FeaZR5z`@g4COb_RF+)oI9P+}Wgq0qWANM1Kizyh)A^#iG(qeQ&s4ap zA(-gJe)IajR;@Foz&L@(ptTYNRVY`uDDDIGdxM{)Ibj5c6HD8?>Z1&nf#@HqRQ4!E z-Cj)A)p#+@#yAv~fQVChFothnGXa5~n(WNgYI#hEMC4h#mtwoH`U!NqRC3JzlV>qd&wr?BLR8-0shKM90=C)A?tNH0a!#?|NuDVoMIs zLfkUb42I=Bd%n)sPELPJJ9tw6Rk6&)S(jp=pPj4A8&U47Km zmB-=)gN7=?0I}|tZ3ldhurw1BT(EDJrl`pT7iMc=ge1E%C;D!;b>2US)txrBg!e6QjW7~ zg<1IvzwApv^9U1#v%GC3*QDO8c+w|;SDJI(kflcJBCZo$0Drh{L||J}Z%*l`rl|-@ zWMJO=h~N4V9wkUIC4jUegzmGE;VetilMt7@Hui{syo{asDa`2UU>Jt}^pu!I#lzNh z6~L#uy6C}tbkTSRMpla}3-yBJ!u&pqXWm@lsa$8h`PUcpDcq{|_Q#G=tJwvqgsOw0 zShL>?8!KUeuBk)Tf%@zXTsS20c%Qap0`gs5YQv{iK@HNh(28=6(gHGPM!`xBrrvN}W7v+cI1Sux4_&Cb3k) zzdt_-0w^HHBx7&g88lOMAyWCPGT)Y12twrR5bto1btB+#Ih;x$hw)oJ+b98jV~m+W ztT2mG#wPLcOpYl{d~Z0^=Z>t=g|42;&W}H17h+jwJ$Bg}h9of0Q-uN`u%2%DTAflO ze4k^&N8Tp^eqS&rN7@4V3`LHYbD}M;@ex;HIh62lTOtA?LZyRf^vc74@dUq{gVp(k zI^RDJlqqK)ET9!hjW7PnjyiOpMkR!Jm=z-#n#?<61v?Fgg2D=dBZJGBP(Yj{o{L{i8Rw8b1lpB2TysLa=`ZtgP&+e zN{&`s5Ncz~Qm`&#T02YXm>bGN>oI27stdFC-5w%$E;LMy6JbI>YbkfRZW-+&!u^4k zE$M9#4i4W@TzM?8Qv*3^5XWceY+GLf_-#T*x~5s}&T5crIDM~E~2K1I9M zaaKacFHOqXx(P`X1V0?kYTw)C*giE3@yGT4(wECy{&+aV1&#N!?C_7Cc?~EsHrW$q zw2U_%cr2$0db0nj8w&9m?f#}!DAN(Fw~XU+0(Lsc+1cQv3GWF%ecUVDea2AziPxjI zm2l?A#~;Pn%fBeu@dUZiipfn4fo9l`+k4Co$Sw2xD2H;pqs6NSRC71D>yI5fS=CO( z$#eh{B%}0=<$>h#7qhC>PnW(Jc31Bn-^P2~SmULCbP{Zv2R=iJlKAHQKB7P?(P|GN zyI})~k&#h>QS;0Bck7g^11ZE7{Yg?QT~VWeHOJ?4$_ikGu+H;6N3uj-9d8CcrhEy+ zo#A@pobkzF#7w50;b1*>1<6nl0`T3bJt9qHlhAdU5FM~*=m(ynj*c?rssA32Vcm7U zzUI7U6E@y2?EVaIzxK1S*LG8t=wemDrczl?Kk0FyP5*-TOuU9#Q$yKrkOjx;%#+I2 zk`@<^^L0)mGAc@n$t;$AbZXAs?*)NpHIus#%RSFTSJUea9Mp8uEn{%$+U6fbP*l3h zzJasfe+4F3bclP0pCm``{t&G4`ZCdmVxHgwlZj=2w5HnSQ)Jzqx()7b-cuLKupgh03 zTRqB;r#mxd=le@K^)7iy;B9|gH#$_ny-bW#V;|`1LZe12+9O(*Lr3Jm(939`qTcad zPI+5{7I3Iw)R)!W_e*1gUH$aFgFGyi+e`gR_N9#3yrODzIL$o8Dsq-w*)Ai$FGp}S zIW58Hb(VeB$Oz>ODa6G9U^S;f%}i-?#IEAgx^ppPd3kKQYFvH~z!=cinOZSm9cPtq zSTn#68rJNyo*w?PyC3fWvbV%QjKuA4Kp8owQ1hHo zwn=x(59Arw5T6LT+9PYxOu(ekrj|}5Cep2f@M8WUyU#2A@fFM*u5UeO?C}waKi>P z$=Ki50I9Zu4wk^OuJ&)ep;pr#8Kn?oJgoS>sAESLwD{&gd>oXt62a++3JVCu$DF^UXge&uf8NQAM zvZ1VOO`efz99^|b{`IP)5Rq3rPJ|{EUdfas{7ZPam)Lwc^KbsFdebMqfSx^GfBzmh z^d%0i$GlFEecZElbZl&FC9VBahv4o?r1=lq&YCc-JOmC)tZLgdJFs!WND4HX6dF$h z-Go8s4Vz?*$@~msc`-4U-32%+zHIh$sfQ<>CZ-y-E8@M9*5!e0_m|`eobBQ%k?Bwv zy7>-h311LRCgkSPw=ES)1Y^UhPd<lN+ME{$SvGT{~CyD(jzV6Ml`-nLP zO#vghjh@e)7v>U8C)yCRW*p{QeUTfLc{t3*G30S zIK*ZCtD|NLKZ+dV9B`urF-j~;6yC9VS6#wx-mAa{COoFA4X@X*;T(`Ihnp#lro%U4 zhXzH9$T67@RXt`0?M>co0ZY9sH=dFXWPlBQ2U=94YPIbe28#}y%b__gi`wHHHHkbm zDQ6FzX8PHEuBzZ`XOd6uqq5;yid|(zgz7Ge=f#8Bs{1*kVpU;z;6)cB^n<&ke%*>` zD!cn%lLT{zmWtjW!B9Ua<4coz)fVErKN@gI0qdFnTU)kSr;964@<53;kL-m zg*wJG|4A%W9CO2;UGk=r*>SD)pmQ*+7BR;220L1|=l*)RQ882zE2tTl-2VQNW4P8% z_x~=fNb3DYtiFGC?-z_gF*@PQ!)vlH_jFCNs$>=Je=frMS|!90YI=?jsGvn)qcltY zOPvrnf}s5HFnoF{h^!-uc~Ec+Mx>ugM(v~7*M$n*bkW%z0px%<$JC2v0XeP=GBwZa zr(kiUfgHt94&yDjna6fEG4fl^sY{hm%|~@A*(|)bJKxYs?hEBYpu}gDYu^V2Id^Gt zyQ6{}wYEvQFzU0j3pHDj>V{AN&pG@M0*5BKZOF8h5TCW4Bw4lYANU-qjo2QMg4h-{ zF6S50`so0&65=F;vK4k2hM)Y7favEl?wc5?tsGb0Go3YUHHZ%t+QR^$_xN$6gRI?d z@zNMx!)xwb94Fx&n@jTfaPR^qNTpr-ka23?1AhT>=_8pS^g`DO@v&= z;Sif74X&@S#J+pxlE=M=91$wqL`_O<-yJ{8y)>Z`J=Dm3u!Y+a($mHY%7^~zFdoUN zEvr5~?*^cjfZUlJB*A7}FCEOhSBy zrJ{@H9DiwK5G~`WefmSph3QY26@7=-GO#N*WJSAC*N2FD&C<6q66$nuqI|FQw3P!6 zXM1E0x&5g0_lsfSQ){IMh2d{}gzR{6;|IP6$uEYR>J_dg=$$s_1P{*)A?;5f3>+j5 zG%~^0)W3f@Y8OY*$@@U!KXXs=lJ~cXcJM2KXJyHtWH#tMZ|O_ruP=_5Ui#j@G8uRk z3AB$fUQki-byFiDhY2G8>fa)rG*EvLi0gR^Y=lNrNfuo@{FFe zFrye<`0#&3v>4_OgxMmr?r=_& zWOtGA-^wUhgAnH9q} zqzz_mN}nwRk%e|t0jF2ZZa?^51jy*Y(dX=b5Nv^`nFq2d&f7Dos@=F;ZD)_G-!i2J zjB!ruht*N+VU4KEoAc|2HIGMbdfzK5H4v#U#kW2@OLI!)NIFSPZepo%`q&Xy$nU-rC2!_j7t4#sl=IrhvH9(LmU>gE=bnAF z3Kx`LmA32CLI|r4Ms?)}u23cfrewZ}`Z(zOThYlf2`023&t!DFmpz+R2C}MGO^%x* zv5vHBDa{}DfQWA`3I1__bbSfGGy2l)=mnc$BQk-cq z1nZp#1sCE6JgfQt^}+a1b|9h_8hlC? z`A2k?VOyhPQiNBAml6}tA~oD2&yL0E@D@V#T@B}XSgnp?ClBAr+_iMETeTMNUaYLP zVYs`)Z9Gl(B6Yf8W1bA>r}s>E>m)hOx|fTReO8zr-Y8hzkFWKPkiSETNcq#ADZf}! zu;IT@8p>~OxPg>M-bbdgI%p6v z1?=Xe-AOs0suwFjI!QvqGCaw~QTRfO z42)fY`$aQHRdBTz@j3Zi?wQ)4SZhkcjaJIQ#+)((hi)iQ;@pUoX=9*sNQmU^;`{dN z4Co)bL`N&Lb$nV&1MNc#^;bKeE*I7D(Rj3a3gpQ_AZMbzgb`5S@526Zc*E{ZsZ(S5 z_#***7Iro~_RpwmV8_;S_@YvRfwa3M#}Y-|m9JbcjSuH6f%C5d3RYOiP%U5SM1vLL z$qgGYy1aqaWEmv}TN%q15zJmPOp4BgX_cp9CnI6wliYV;lge4<{B92A-I1oq5G(8Z z(g+MxW=4|Ql<12{z^eJe6Lz(8Y@`=@D8^`EpWgiiq&VSeW#Pl~CvQMrlbYnU-L|6z z=@)cqZS>iIVC-b&BEF5mBpRU<(NY&o_g#a*4=XG9L4g_%)!703UjAm;rkX0=3LdA zcA$~9%dXm3jg2gJ11X6T+l3ts+5-7;nE8~myueW2_j$lOFxCTG*ISnIJNnS z&em$Si>UhOP{GNl=|ZR>vRH5yl>c1h?@0PSlsg7xytZ&Rx}2PCT~F+v*CUwS6OQt# zUT|GK@f_r4jAi{njbgD?V=i{Pi)8$8-|n0qY9^K8>FN^EA0f^Bhh;W>ap@+s2yak7Ayn*X%sR=>M+Gu@52;q0h z>3CJ2dM3$%!v_#8GfA;VkHob|Bg1H}JG!mMkFHEKg!iUOvCM+C)b*13D_MN-3(b>! zGQWdS8;6(@NOtlw$c&HAj9u+PPBvRQdioBq;ApyQ& zaR$mtNN{fcNG%%!X9XOJ3UhgMRZG;Vpj1xn(~TDz64B>!lRB|9aL!~npRyo>mG885 z_ZJ6sF`m@57bg5svjMQb_E}QxAze#4?@3XhtTxJ&f^As^M_6GmFLi^jzv%gDAGi%m zK8$rm+M&0V2ThGAbX7lXZznf;rMZ&=`K(!XH4YnZqT~6HgOQ~h=#$txC@V$&z&=YF z)>mPA)$e>95V-YI2V9nS=qUJOX42SPej@e_Hi26WBQ(vm@Gl!|`{xHn@cSzqB3@<) zEzT)Zue5lh?y_fe)vn)Ka@CQDy}rRP*o!~uv`O-Phw0jm?ui<0{-HOj!NlnZ^5;=P z2^5jqe{G7#E{Trh#5r18lBfqaLwCk%$CII!e zqmPc;8udP-k&S-A-nIVFb-5AfG=zH{* z?70zvkKjkdqTrNC-!8(PpIIGtX}h`4W_ahCVmP{)Gh{p}UZ8z3VuIJ2R8n#nv~soC z^x5T4>LWQtkBk|P8aCX9)xYWd8+3@*l%<$)zN%GYb{YX~6`Gf9B;J^~#Fh=)mUj$x zav&ilWM)aNn5XPZrrq5}vG7hCf|_7w4X5{={O`=61|v_K1>HeT&d_@yVuj*tZ})Vz zOhVL89YI&O=%yuAF>7SU%%yzJg|gVtNt)P_N5x%}b=X|vT?!@%$>7JI5-n2*9U221 zLfRxO;~CiZWd!p9FgqF+Jf_Ec5Fi?WMS=(-8}E}}A+e@n-m|XW6b$O_X)=j+l%&5} zRao)WS>}_+pwTq^a3KLj$t+gm1>Nyp_2=FZq_Y(?Zfu$0ve*x|9PU;_o}K>~P9n+< z5cVyLx5c1!umiUvqm@@d-#5@xDD_sSTrBK$%}HD*0h94Hgxs8-#_@FG<-OYXmr=z>tLRE!^JRzIwGI1!xRf|@Hb8O5i5D`uyYA-EUNhPl zEJ)bX{xVR{9aDX!qAUE()2C=Dra=lF4Nq;i!$b`9X_X&Kvyaq*{)Wos0%2vX3~q9N zTEepUaRirrr%mbm*zpwhvd|}MnvpP-^;v$=scTOR_M5_9RNtwM1mn}Z)pG09W^*2l zvJN>cxNiLa_zr8X_g~*J{aYW6h7C(k8uN4a^te)w{dssxlKPK5&bTqSm8T8J{!n)i zJ;N`t+dMD4Tfrf{*1o5ym%%Rg5*B14yzY7?A$eOtw!pC5|KSI@)A^BQnWv{(cyiM> zl0kz3iANNxdZWd)TE0q$WlM2u{?Agu+|Pt;Xl=KJ1WcjrA~jd8piW;Sqd`Ih9m48@ zK{GX}Uj=iGmYVtXLSL=9-yaQqdMd>!$(rFdQp?@`u5JL(e=Fo4kKa3SvjljITO%g#!t#W~Ow=6MO0Q;)IKViPEwNygWNagTD?I z9F;NlzZEgIr+N*9VBx;vS~@(9HqHqwm%pF2(>X2|RV~92Yft`mRkVe#V{U|4Ih-yR zqT_NOZ)q$l<;Y?G$n$F8!rSCd9-*VOJm!O7gO^Kvv5X{ylhz=WDS2t2Yu6Rs<%f;Y zJ5kgzTj#(W$m!3zA2fA7f~tjRJzO4CZ}d@)yeGS|P8s8W7eEPu(Gs<&w=Z-M4778j zU$JS$b2Wa$;VIE2IjgK(VvU8uw-~}QnPeRsbPG>*Xp6SN32gv5;YmKM5wTZ zJfnNSm)tMqwlex^Q0iyZE0mjdrI>WcEX&$)(tBXM*Aq2Y@I(1gFMv?x(#?bb3Cc!LL-sXEyMxU~)gc&CyA07SF0*USb3!hT>6O6S-lbKS7o73dPj>4z^nU6gz9g2!g46ytDW#VA^$r#>6#NYsPuNG{cle-cVvqhSBb~6t ze=NGn?@2+oGV9a6C-}GWSm=Ys@4Uf!Y6juGL%4I5nM z2j8Nj1RVBDZ|r1b__M8p9I(4jh93hp05PJ3ft_#w@HkAwiX?R?!`mG=qTa?;1TXzh z9&tO5Z_v2|Eg*7dXb-hpgrh%22~n?E`_>Sj`#VFQXM^u9+<(Z&;)|+!$yotzLZ}vQ z313Li717I#ML=E%uc)M&C}RH^3KuB7qQkQAP>Og!$Q4(T39YxA<&@iQrh{x1oA2uo z-4wSCuiPgvjAQZ5?xK!>4R6t_g?a15&e9zI`%vrhq=T@gSy3m>*sv_ zVG2Q7sHxQD#}6Ft1E&IRmqhl#KWqQLF(9Wa`+_`)Bg_)0R^&njhcWZ;Evm9$LpJoT z*kX}Z?5k~Iam!S9z8a?K-<>d?$!-t>Um@gd9z_gt(V-fUJm9EbDI<@I#maePQk;p1 zkvw8|=>K7z%K#V2PHpu?5UYh-?d=3bREwxWX!XMJ4@nUsnB;jp6=~6My(<(npfR~Z8u=EDuE}PG%c7ek(VEOG+~Y?Af3y%^Y~E1>zJ;-A)uKIOS$P#o{Y5(3 zQgE0>MbD-%iQ_Dk-n%@EARfk4NDgLT{j@*|plp8iFW5vyeNL-IF5;05uek8YvC`qL zi;63Ty={ns%C>uNen&8$W3=e!CQNXw3%B;6V^ElroeOO+8$NRDTBF$Isfhb&>6#G3 zP^`S^$FXcEv04s;=SqGmGQKhLbrW@YH9YgGx}B~XGQLo&%Tb;)7`JXQ<5-Uuee`aj zt7M8`IX^TUo>ud0S_P@<1FG?&Sns8Fo8titPbjxXty5^18$bu_z!1?TZB4r~9`o4h ze6@hnA9M)A$=n?K8X#eDALMzIUkI%(HOR>_uco_pY#-q({2U@M5!+);^+h)pe@yc& z$Z3Nd&^>T@&9z_)4S6-E5368#Rd`9P^#1^aVQ+oHljwt`b{>9QRFl`_-v2%+zEPCb zpZ{XvO#Ul971!|njk*fT$K6<=)Fy9o3{};P0&~8&s?sIG z7$VsLF{V&n&Tl^#pK>dqHCdz!`a=P6u52t?0wYhbNRMn8g`2`?q;FivP1ai71?3d$ z3#8*zXZRq03oZ_}vX@z}5EL}4!^7~z5Ja-JZ%u^VIL_>8K*}f@H?+Nz+UhiIrpO{l zXW9qaf`2d}l^Y5SqoKqA65Ad2p;y&OUC{LCKIHi8Zp~s<0{>lGj5Dr+$wH@e&GUoU zeG#x{Jr#-zEg>JMNVv6xyGBYQ#&Qk;0l~-2$X6Ma&CbQRCN~IW zu3Yj)B@bJxicjkIPFIb}YG2`XW-l%EoWYA1hBEJDBsCvmzx0kTODqQ<45x6%wKqCf zej|Q+pP^jr2v^S#E8d}G_uMjvAXZ!erznYO>mf?q6M-jn)A`JFNeXmi*ieE|^2+Bx zS~ID7uZg|epgj813o;5pTZ+@PysLY6Le`IZxTLYolWih^r^El((<_tN%!GK!UhbVG z^|m_tKf@k{a;0*dckYN+n!Rb%b%>w8R#_==|J|r-JAU^E7NVSVLx=4@ts`+TwXe=n z;?}m=8h#+f16@b+I@ECdPl@aVWmWo-XP%#&u0#Er)yudrEIe^Z4Y)a;ML{%T{1Y2C zpbwp#ak}9n?q7*KI->1TH!RrHsfiB1g4Ys@a-obAqymZJr1nunD7K5Q^jaX|4D*4V z^EORY#v3uggZwNMQF~M5ADaqd#;1Bo=LG#GKxz{>w+~#vwh;ffEv+#7!pQ2v;ISVt zT|(EdL^SMC#WHwI4ohTVR^g``pjqcw_ zEcIr(MhgkZmJg&LFtY73JKby_&y^o3h$)>9Xv^aMxSc+ka^wCQ--Do1Niz;Pg+ehQ z2nP1Xlo_!&=?SyF*2{q$7EZGQtPr*^uLxfuHNGcc-BDJ%Zl(*pI zhS-?Ck{ewmxUxS?!#oLbLQM%L5_EP;z#lpIIKi6*jU`>k;Nd4=CjLh1#g?X4!74zs z_X-kBV|ly)JtLlWYEtuE89WzcYugEaG(dRZCP}?1fdU1gY~tJ7>W+%fV*1P=#?;?Q z6Fus_*O3+Izlx2!c5Oh12HcKyMg}XoH$|*>U5OloTi7)?`b#t^j8xLbdnrF8R1rYh z8cf$+y2yl`eZrkllt?H44J{}@qRk*J^GQnvhF$2KW~Glk-8LGA60C5xXvDJg9f@W> zT4vneEB9h8W76Nu{{iPAveG-p$Z0kwG7SZm$WSQfJ2QBa-%I;=blxn|skS{VrwQm>s25%Wz4{G~y;Ds113peZTJ z)o+7jv4Y@a{(KiL z*}pA$&Ip@yG1iD$6(G@7ahR?9*`zl$WkxMW7gf$uVIi4Ph3=CM*Eg&P7`eha+nW&8 z?c}XMP0!+mAmjEAr##2nw`_17X()jaHGLHueU`nX8 zU?$SDdlspnU`FF|V{ACdw=e#!83ULla_3h6aA57b!9f=z!y3{TuNC@;1=;ovN#i`4REb24U~x z!AI~Zk+E_7W2440hy9j7;#V~k;!ty$uTNIMqnvb|O2yorG2~aNd0{OiDg5A?npXJ0 zPegrsDa37+DD!DPQF?NKU6}m;L-4kt=D+FT-#j_tj7eOE7Pj9%}s5cACF#xm#2C@ z7u3HzJXB%V8Xd9}?4h(-nIqE={G9g^>`oeQjz=jw! zp9Ie^L#l{l%AfJ7TYkpBMjJF3&NtBwydxuLX`8lzg@ zzMKe$`YH$YPY$UwScv(X{e?^%Pe@VitK9T!YR#I&NiG?qMlVI;Wn1wOqlh8dqk6UtAAQd7>?2;wbO^r?P|C(KeSu2&mP3RYo-{Lu{ z^J6;mUn^_lGy6=9CuZO~kTPVG{jj7TQaq9itqX+Ox}xi6^$NY7I}{+*C}+_=)~_vx z?k0lLMM@`&alV~@Rl^Y7)k{lhTm$@PSH$!!MB`7Ktii`;JqU2a3R{_kD8}J?BG4BV7NyiLEInY7Q<|1 zJuKX{#R1Mj*uTy0w{IcLTDgiK?obJRFGt4TDnk_87BpYWBJc?$>xjH&zagNs)`2%T zOHA%dze4wc?8BfxK-VVSUu=}8+JBWxwLTrHS#gvgEY~m|K6C`3^=1W$n{ii{eUY7) zZXv;A;vTrme&_AbHb16_iXS_mf9j)Babfiu=l- zC%U7|nH)x1wHW|0CIyv_&inzid+ zQRTGwPHS={#TAcif{(0>(q=uab^5xhlz(sCw3>Bg2rGDoEH{yOzd={R3cC3ZkkS0g z+%(8a2b;C<9o6e_dto+GHBMwykd&0;pg(p$-;YphTC}zTGH!QL{HO%UopSqZ+})R+ z?_4ib1pgf2n#7vgG5%rvn&|)Xbe{VxofL238)SRrD?-1&xsf3}YUO}rxnmzQ!Ix=H zi#O2z`EyZ{IXYUcgR7?XV>yJWeyul5-M`XIh7doQHl?#yyRTMPfFK2cvR(h#)2Nt|cLi6B|?SE~} zXgR#-Q&a-IgP|*_4Y>cgg~MaMdaJd*&|x|KpkZ2JMkz}3QSI0r!sXpMqs@yps%hs# z9m6Z>GH3xeCXv$lJ^ShCZt3Rh$maA%%vCGd`x646oU03%K)rAt$0-lTM7D#Z?P|mD z)%S4O&h;=feHJ7spMRTK*l>i@3`l3^-E+CCmRd`w6wDSUo+t!DIpKn|O2^+rar8eP zlH5=gL0u`lH`MJNe9P}rGonSm;J4Xh3%Lvz$&W`wP9N2p$Cg(Un6BOlB$bYJ+ ztK7RKKo~}244d`Vrh1SRATRb;iBLjeMCzIT-(oipJ{-4YrLBk3nzc^~#zJEc80A^x zivpeGZcG)>ewCSKJ=0Y2+s9tjA>)(R)z!$?y1`XSq*TJkeQ$-P=*YL9h7TVl(D_v? zc@7DOkY}d#@(jSec9v6NP^gU^snp1xx>79rr%?sd4hp78Jg&5yr0Mygo-&Gu>FpchUK39WFR>~w|Zv}==6T{hln<2{UmqT zRKkM0?z8JtNNuGu_`tt~OaI5d-y5S$KRF3(Noqe= z7G*cJw5OzfEr__wS_2l^mgp>#(SDQ)VY5~#?(vepvTe#Wlt+zV+I{kmGLo%14Id)% z^mwn&Tp(Zt>0fB#K2%RVfia-KkRwM#eMIZec~j0sD{(1;cdi2iv6i|Kmm@t)nqjDy zk;DvIlM)23(QQVBVsiNUIbmu8slDuobat6R(kYl5cNvE&um7f$9ylv<<*LQ1Mh{oZ z4uHr2)q18F`h`}}+Ns)dlA0C509InqWHanQE1QjIv|G*v81cm#MHnRf_T|wd0Ao6I z4S0+dHMt0gitv;I|KO$pYEU3bje~Vjv10}i zvIQYNJzeD~3t+(Oa8@S4^LB8aB|HDlTX~i-9cNU#G0+D^vm)?yTJK?x3;h15R2`Rj z$*6P>;R&V=hVAfl>~M7qxNe#ttsx$CyzO@OOwS@Jr>tk%#C{E z_F+yRd5vOCc1xVp#B0KZ6`aY}10aoOZDc zdzV0Gyw>5{?DyE}Yqt&%=LNj4Sa!xT%H|)}12I)A^#rk*1LEKW{TPrsqs}G2owmh) z4<Pigdx_Ps2kP!*1DZ!PJD`d&9Qc2l{Z>M zNyaD(g$duP?-mZ^5q3C!Y$NUgtp-U)(i$l$Dal&%A0#XWZA85x=+d{px78|ict^kT zGV9dT0?)2%mV>d)h7-F1u_rAM=f6-k+3NwHc#*L<63csOvmdvF>df+sAgvHpcu zGEnG8+uWcz{WL~RF9r=M*j`0t+0*z&pfSZ>?#-;c)?w?H1vI-8q~PIL8$X$h7T2>M zqYmW6zgKutDDL!tGO^pP4+S>R@@d!c;l{~sB!yl6V&}(Xu^P>2rl_z^O%EPtVHB_B z#EWnI;@{Ig$pVbG!%ue-SOmAXM%gNoJo`x$_&%RXF1OdXh2oW z*cT9+WvnX3#EY-0lLVNT8Du~AkLdtP6&FYDXF?1-rLM`&+>IqKW;e}7qr@iYk&^f( z?xPHY7STt=**QLxh30Yuw+{@T<32BpOK%>%x|eVEPO(C&w*Q7X($}UFXK6g8K@1-$ z3JB$8^1z<{d)}5DMK52)!1a9uSTOkHD!0Vj=R52C@r5cR%kHWDzSaI&BPvV)1twsf z4-yQ!z@scz%;BVs+Bc%s0*260N9b@=wbty?b9A*6&3S zNbq@8ThntwH+x!x^@u+EEXMPeV_CE~Gqx>+>VWvB1=SKw43u1()Ba^l}w^ zR(_+(l9~&X6fq@Aec96Q6a3!ab;8QZNg2dV15m0@(fu&3yz(V$Pljx<(~dm0YcA_UO(-Iog>jQ?UsOy=%mR`{&15ayMTDzkq)k{1QHZk)U4HI1!df zl3a3XQ+%!QPU-FtkxYOftXeon;j~YzwsyadZYE71(c$TgZptTXXL9Yc^io<2g%Yv} zdL$*%t9QR4W~*(U3|i$xVt0Eq`ddAN@`rO3oQ~@f847MUXU42x2x{;!5b8Ny4>Vm4 z=Z=kef*ZI6oqtJG=rp*@$Ya3~hNuiB8F_Y5jkBls9{1im@mkW1+1G9^XfeB0eGH2B zkAh|SRdCiX5?bmHp(ZgEN^B?~{ARS^lUD{UT9bM-1J0`I%B+$;i9 zW*{3q!Cv=D+}>D`z<3LMNk{*aH6+PBsHi}LE!!NXP3)R*%D`X@Y@+Dr{NB}1ZOeHU zRGk?w>Bkd8iB%oO`h@$hIE3R(H3ddK5admzNELppeHA%yUUp`!Aft(oV-f|BxufcqGMXB6yl!b{c8KXIuec4>8s z+6s^5pBRFWg3FA@^miJAG@}3gaZ_vZFDE(4u8q*YgBai=-zh;0b(Pf z(R@EG(u21UU(6x;v(`3e)e8Mh%{r>36{l)>8x-@9p}WUZZm$i-H`U{ssxUVvQFw=7 zoy@`U2Ne{###vof-IOWa`vI_xv5U@S_^yQttQ19XGV;P_rtWo$A}YbjH|JVIJL7Cs zT9BxnbekHS^X?+kRK3sKnc_$6_3+D<1j7Zhzz37D^Fnl%z1bE(Rk-*T)Zh>(^<} z?0}C>t~ZoMy$FTa3;YD4b0CHCC{>6ovK{N^Q9I$U`sHt-?Z7FDH=XW6f>-qLXeMCf zd0LBV=ID{C>DwEAxV>=Af5XptH~Q7Ph+=#%cmOysdI@-3Otr; zb_0op98hzE2`QVMek3lbmeKfuESP&6@xg$Z{*F*7QepecMxD+uu@>Y_Fb?fd;#Q`P zU_U!Q@0lM6)M8CktUC8uMG{0)OA0=#geuF{Zl0-t96e{(nr$BE=`{5$XR1<>uA4Ri zMbKQEeYT`ADD7|=fhueYs?1P_iZ67)&dVRy)GCCCz6#rXigvO+!CP7SK24{&srnsw z0*wiW1m$q7)0jA8jbI~!>9?^1BD<@ydi71>nUw0r0%o+`&!WCzRa&i?%LoR%dXi@7L@A$LGeD=tc8tLxzb!w4|S1fbtby)u#0bjw%^ zCYI!0|2{=%_hg;-Pf0}U610uIx4?wR?=mfvelm@0^DVKb*iCk#H55L{zdAeBc`pL3 zDZ7sYwG^#3`<)?bt4otnpF6?B;zU3;6M-J9{mZ3#=s`LpBV@~YqWeS4iC83m2*DpI zE82d6+%&g{JZIAkJ8KG}4Ta@75b-xp>gOhdGpl&`&Z~;(UerPw+w1rGrkQ&im!A&s z%Ae|th|>?xt#m}#uaFeS6v$JPX^CS#Fa+oxJ{*%qW@c|mp54wOuKKob%r1`5fr*Sa z+|vp>ddsQ^+XfT>306??yIKg4^Q)1Oz`_~9qhAwf6_c|Sl9-kRXn+nSq3iHM!e8QN zCOLPml$yHPdwkPdM$}b`+YY|JHA2b+cC!5X1A#*IN~aHU&iAIR&17ByqNCWZ4B$ab z6WJ~?=7($D8lAyfO{uf{`i|CfZpwp61eUrx|_?KN#JHd{&x}%WogNibp>YIwQ=>S=Ia4Js! zX&z_p^v?+?S!yOAvk6@e!!P{Ii63=IhsTu89Zchx#HfMrcy~GN%4#!TIm>8{$B>hg zBj9>~2aKr8gMsAtAUed6M=7kjAvGog2gN9%oI6~FA+W+OF(JTxdZs@;EIB>2ZfW!A z&)}uWoBM&|Y)JmrlD)3*^S{OlF9i&nPzZKCh-XKN)Hl2Mranr^YKzQV(;ZrNm6?#3 z?k58C8vG)R*S2Au;*CA$c4b&3&cxTfnQi8i+p5=M59f&Gfy+Hp`+=!83YMkO2DWDF zkp~D%?sb$N@<*uaKmIoRz`Z@W-gYKQ%C&F*e(HfuAUl;Z`#Z+%xM^*DYb!8UHgfav zdTl`Xv@LL@xe7Qv4IS8ToII)gqi3sVlT#ALOxv|YKzMr%h$hY(Y* zSJDE8KG|y^9&hX~I(wm#uv+HJzH5ECPmawD5ICDL=JE9s&h&+yirg`eSnql12z-cL z6^I6BWHO|MMv*DO44Uf9HHi(1a`hAj*qvhs;jD^9n=TX9D{US+s7Z)XF)gU_F)tG6 zx|dooSSq;o@z`H)hvnK)lOP5TPAo_iP_TfT=&t}oy1F_fSoWHXR%w~aOuqado@7mP z1vp25@nh{9g+zuNi=wKJb%mKKFg#{$Kw#nMSWQ5q|J`ktCX&-E>Y|*-Y6=d36=s4j zfkBZ{Z$+Z+{&D!s&1uO%FuRoc1mmd!rse@{IlZia z;KNTcoQ0pAo^Yob)Klr?a&VjyYv}(110tf-S}Sk_9?ad&?yF>aC8{{2;zOS$J=Jsd z1VIiHPHtK*!E4i#UGk5svIE4?HC_HV801R0K1 zL{8(W)k`TV95Yf`4*lurVTpKSh4Q16wG>LEv2HumBYkwBKtKBfu!*?cP7&^|V7Dl= z)wS#QMBp3v#Ny0X4*cA4b1Zj$Aw4kZw4x=ylZ{2rULlf!*welWU3-otZ6yHP@l(>e zQ8|;%sm$upK*mCeFJ5BEEzsKib=i+#{bbbLFOUyK4xki4|F^L%>ER_)-b}1i^C4h+ z%%RS)!%E+l;WwikfyYbUPR=tv8*t`1gK?%ldj7+z#76jSwafe~R?U+&_g=vC0%*_i zG}UZbuSPHhH@dsgA!cK?H~1r>z`H#+#3(Uy&XrcQuz%L-n>j*Di7K`sEAZ`b`;CEvZGuyp=M^B@n)~*JTp^dcd_1%hNtmyy!-sn0Te_S z|J$Yj1WOia*EE~K36GfzjTE_DTNFZ z?kOCV09xPi-8j1mZ%crj2$!iOl{c`s`?%O_0&jiE{F$GG_$}_wiAihXV1?vX1QNHi zenbB|s~j9JX+OgNN&&=wn`e}L6aY1L-vC>gnkk&@eVg}_$jQ@pvJ~I1CK|k`Aq@@L zPS+v~v!cwkuHLBo!bQ>i-Ct^evWTMwPVPY%3vukU;Z$bIcx{prEpsQZcFlY1=aqp@ zHAm0pq}t<$o;d35pAP7FmIuLf$L*ZP0hkyR$^ZD&e?Whv^H5@AV|@jggfIo_P?d)y zBV=)Z4EMmsjo%2&SBM{LJTFB=m+vOJXX7dkbUrN!l9JHKzNJ&rg%-3i{m zfAhL>t*sP(JyQYU30scB(Vm?94b7mC@bJgo|Lqj-cx3!v0PH)U z&1}h_Db*`g^YqKB{`NaMO9D%ERKqu~<5DXZ%#&3B{h6KfWDwxPj5<-Ub%~}F+l08~ z^o;w9ZrHG_Hi1!-yf?0o){_5l(6ErjcwG81KVTMj-j^>ljzpN~R_GK51^*N7gCzfL zr65U38Ov#ot%=0C($5)p9lRVAqAUBcZ;=>4BUv)PYNGG``BmXv4d2jS!Bx|@+Ty;! ztte{Q*Jaw6>>jayLdJ1u$Hw0FNdOe$r2lYd1L9Nx90VmPSy9J2=Szo!m##|yMqB$6 zkXATvCINf}q!Eose3ZJ=LMki1r1=##o2TGNL^qnH)A7;xcz--~9 zh&oN0K1r36o$TrSiJpPePePzg^9ATlsaE&Vy^cC8cFj6GCtE$dCMOxUj$$21m{5fQ z((`bl|2QrZG7O+II-c*yj{f8%^px8T_uDgMJI(GJ2GeR`v{ZxlE3Og(3PNiaWWdca zI@j$Rr#ahMteo_g|1f0NN`b|#5C882F|52VPsN4y;~Ce;Yc|Kovt#~BjRXCqLOn&^ zJ3WrM-fIE;DL8Gp|CxBh&B~|{9M0GSi7i$wesh?C249HRwfPv?p!#k&&mV|nZ|MF* zE}(pacdl=wVv#i&u1wEw{NtSWO#C1g=xE1KcdK^RKuM!H41zBB6c{N8uHYrWt1&&Qgz2AH_dxzE}A?0xNP@1568m>P1YNxx-W zzA?J|Y_J9740ig(u@9DLcpuGt4=UX}to4j4k$H1Yc{i&hLmLoQ5a9L%BmUhleLy_! zX1u%RDXu;Ip(NHM$(wOJoDiq;gnepQk@I2N6C)b!lI;uHu&2s9E2s*yh+_P&aTs!h zLF$kD*8qk_`*)qInGGC`Rr+>V3=ep%FJ65P5`W~%b14a4fC?z9bDH-E_uFVAwY!g4 zeX6RLNseFs;&e(O$cq@$2AQG3<=goRH+PW*>O1>&D`FBjUuHxe2$y>PNlO9vIy2p!y>{+6c4{9u9|l*RIU!{M}O5NVW6$4KE!YV?X$WO%>I1=N&Gc=q12qCrGe{VItl;tLr#f zDK6qtAF0AF=_eG#aSwQ?2ap~&X5d8CDH}ePCFaja@$=uBXX9Etndr?^6^p~d`mb}) z3Xsio5i`RH40Q{s&}TJlFbxsQGA|}+el~fj8TAQX|9*SwJbdo75Q4PsA#gOU9 zzuRv=qs1X&=f|L7cP+Rx2-l4#JK+v(9JL3G(b!?wU=j)-F+mh#GwYgRMCY))7w>MX zfq8x^VZxoR(yRw}N$yU%m6Uhle|>esoYtXqRPt%ud=u3x)UN`a{(Mtdo+Ke<-3tEQ zFi0)N5R6C(rmG$KKA3yhP)erT->_waQMuR7mHvvV7W#Wo9hvADwSOTIvhL^;{9Cw; z1*G(Z@gS^fRor+4mnJ1zKk;h)l=SYNq^xF%+k+vI*}?c=YbATs9gP*U z#gk38XFwkRiO;ggNR?{VMIzCRBT3G@vG>cz9Q zoPu|fS>O;tMe^z!SM2{zBl#=SEpe@{8ze_Fi`2Z5c|4Qd#kb|B4Yb#Dv6nAiXsydM zFdnpS%_D>G_8lXn863KA>Tej}Rh;GRM_~?*t$5y23+RUk&K_!4?kN|r;J0M`)z!0|5!9ub z{O(nHho-#-+E4Fr7C@`-OHV-fwFtvhDmCt(e4jjLjLA2HT$g>TllM#C>V*kKlwpQ; z4TJZ?-gt6P=(RO?aYqhCQpxVRW-_5vNUSugQ3;28x6%6%RgLaFFJfiUT|c1gXXXD2 z+x8Fp2|JaZ0IK@)LUM+JhY>vZa~2Q^#;d+^qI{u+48esR;&WYvUolg66WpWNE1jwh zzcA-OiG~5jK@pdJce(fwuJ8gnIoIl_aRJffDwp#-C35ytnB~mJ02+HnSpS-;rTz>* z|5yV)-+r;D)$S;Vc=sBsMR_n683j;YPlJoOt4$pC9s)XlNe>4=vyE?;yTR?09iPAUI z!}`IPLM^4X*tX=K#=cfk@I_pP{`1LGwIZFGSVuRx1`^#_8uL4=nl;KAK$_;5|LS8I zsil3q*)(RqzPdz-#%Hr3OXD=R(a_g7b}NrEd+t#+&4w zU~2Obk(%?)06}o@<6FxcYt=6=QvGgZhN1IfN|bfW%-(;Z9Fb?>F$*DoHxvvy3pNeQDs{w5XzM@Xuw*z|kg!j}ALUwV*)T#=m( z@@F;~mDYlNuKiK~oBvS>|2Ao|d<|(`pn=)h*r=gxO6d2sMYG}5OUZI&rbG#daEN(C zrWI1OV=N@N9R?JVr@KBI-*x8}asTARwS157yPc=Y4pHc<70V90(%|A(X4U_Zi9y$8 zZI@_|JfCO%5{}okMCj8$-6_>#~-D|p}#;Xlsb9<>ui(I$^i6k%VRicb;#KQv&-qU$D zJ-qY7J`Jm;t}(K=)@wFpYj8H+B!4^E(S_q+&lAxepQ%Ndz4eZ1^kr9!{>zG%k$aXU zX1!^K?w*{da6_{6D-lhf#X5$}c02D$IyuWfpNNo(URRIMWI*XdP<4h8HZr8Jp>iR& zU!gh-U;!1v`Tklw;dm{r6=npo&R4jpd)tp21%zuRd~A$HZY>^n37=~|WVjM#jmK#? z#)BLX=agG}gEi`19}JTnoCz`Md|g|WOLd)z?3t#FpKOKM4&8Fdfm$P+Vcs=;`w7t< zO@UByU81ZJQ6fTKKG`w|-MI-Vy8-EN;KD-%9dbZ6wIA%66C>uoK3}7)7#|lih6Bz{0Awz)Zfi``^!5yYk52|}FGgIS&9L=jgV?i;O{^QjccbE70BK$Q zD^l}ypLmFh!Tmk(M?X1`zJYyV^jIDwFWn{mlv?K_D0?O2HAFFt8{EtrB3Ug zvJIs@hEFcNn0NbPD@(R82)LXU(gfyiGWX4Ty(xT3ed`ezEAz@M@81otDC4q~O2uy; zM#A!BMUi1;nD!HqUvS1KhQ~W|lyIj+=>E1OGBY8`*341jZfN($;7m`EZEWSEo1V%d ziCI}*hu${GHwEuLZ^dh`=vtLq<0{S;O&0~#Rs$lXZyJmAlpIll6W4h?CDoreUM?o_q72D_1olqL9X{>2;anu(9Gv#F z*5K_4ITFBM(F7tbKc6x?*7qZcY@0-4VGdS~#bH!&;j<^Al@*W?BH+I4y!yw0z&hBn zL;jpur=1|3h~(p=vRn%6JbZEcQTN&6Ww)=EGxKy^&Q@)&RDSGtlF)h`Ri_55GJc*} zl9CnnIpR6gbd0?2j}u#Sv3C1AAC8QVk1Fq_Oi{H`MG>4R?;8)A^o&Y;g;p>BIF>R< zw!6y^;RtBDhFtu+N~4=oo}V%T$_u}VuWx){=YUnq6fuQ=<+Z6Mk!M$;{%>PUZth8H zo^7ozGm}GC*#<6D2-r*+69h5GJA?EbalTLI)Aev1`DnX{6eOSXuMrRaM+Zc>H z>wcZt?SHs41b1XRd-dyfzl6$LMV2?hfpa|iHjnoaZlAeRIBqtA^t|&d1sZV_p8Lte z?Vgg0v9&df|#$SE)uV_1A7d+JoAcCIiV~01p|#@ElJoA6SaO3K!+6Z|=7MJ0!}GMwJfSjdIr_asDt;Y;fUtJO zQ*H=fX`z7gkB1TQYZApEx^D}o{4{E{zi)Ipd|TuBDJ7KpUIg}cnH_J6%lVJ^G6sfT zSrc8RrK3Ci{63(TQ&t#iaEJfn4yArK^NI^vZXDszI=G$HAXJAmt1YJYL_uy99dO{Gw0bJ*pJJg2;iR#$XyyKm@lXniQ~&Del5C9bm# zu|SnqM-mRM#N;0 zO^bfbLE2R2#$N36zV|Yc!u*E3yDWc4do$Q~rT z!W~69NC3B8wjiV=ObB~@r4A+brflJr{QE=7b~Oi?ojZ7>+4>y`8dAqoS@ps#)1+(r zhGg}vk?aBI)s@1xh~l^HhS$?HB{g)cnIu9Y4NN&V4Yk@_Tbry3?P)?RtR7V<|rkU@{+_8>g(zK~DDH&~YqSx&LP`CmX--wY@v@@g}L ze8_TUr1gFXs6WVm6yzM|sX{dsKeeNA+Z2&c>Cq3X0;x*eyCnR`U+F70P2~+#{9~*h z<%Sx9N7)uA;hs)W5N8qI5#24>ZWzf6ym(nLEz72S-$sWZ zZW%_FP`P^gB|8CV5`uqRkqP)C^D3v)RcHQ*x@-5S#r$YSd4wu1{nSK9X?6G#76+JM ziY!pvhJGE*HJkO9^iRt|alDzclkQ~@$#mJ13EVFVn$E=9-b~qB;jV!I8#6F==AQ*V z1NHcTjWjr{Wx2LeIA)$JAYaa~d(b^xHe}5l#|V-yZIN@!KXmhCqC`I)lGE-~q*%w$ z>gXtm#z}j~r_-j+gx>fsJ2nvRogaVAB{LugjXHNXB zS=*2bUFQB&(4+4@1z&*@lb3d7xFTx(rryHYP zm(a5_$e?3adWq-X(k4qS@j#ENkhS6B`Kbn<{faaOfVQ(p(nsQ}$|7P zRW;Ni@2}<;d>wl@hAv~%v^DdE2ZriEIPcrgCV_x&`zuFX$J55!n3x)VQSf+p{Co%8 za;V_p;rYo{1xwx|kR-w<8VMH!`AWr2=PiJ+;WXb)EMQ3UV6?`STQ!CuB0=EMvjAPt zBucN)i1OZhQH3V|GohE~zR5CPH}8uss$;yKz-@bc{khD^V!6Z2VhNM=b&vuJ;ULT8 zE{EJn&vtZR(GC0cMM{XCpPi`r=0}!iq$Os`kBCn3PG(x<{wxzYSSECuJRDF+W_#r? zzP*&NA4wmq;n6-m*n*=|q%FV1g0$b97dm;#vOZLRyf*xrCNeHj#Co+?@ZskxpjDL$ zxLGWz)` zjj%;96-N*r=}_h(Xki0gom*N8+nlVkBXgZ;#!6xGSVA)z+w_JLgt^FI5#8Uq7G>wqm;I7$;e@7*R#i41E^5*{WCa$-q*R6!a4l1Y0P( zs+SDKqhV8!Yc6nA_iLs7sD^jMhuEkSchJU96U4{jkZBEoc9ijqJ!7V}9Pe7e%@9Y~ftCZKSy#kwaO-~?_a_EzjT>MBl zrN6lNpIUssN6YH(BwealdP|`tj|g=;jD*pxunTPX0_#!+|Y+0S!`a z!YHB;LhH|tleHC^3z^$yZ6U73h>jk>k+WhXLv_j~UPeWirEb*Z;Vxxx-8sDgkT_G%p(~FREln{ra36s zUMu%OJ3|pe0Bet7Bm(SdVtVmqQNf`Mq3?X6qIz&DD@~eCy&Irvg^zQk_fYY68F<4t zoc+^+tdR)d@BL>6(T1aqe$t>{2n-vCa3)Sm4hhyN1~kyh@IO0-#z*0t6le+C=^{2& zkbt#HVm^_nLiDZh5%u>^2E$n%I`zLc#t&xe`L)>fK;F%t`=Ss|4>SyoSO$&xD%Fx` zzD`ys)pZCXFv%?}NSrm0Ep*iAp!>O+jEouP*`eda1O%nUXFmWV=g+8ut1N1EUgAn?9B<<{xDx&%S?TwMHk8{olt34j+suG+bmnh$&qMn%t?;uKH5#gMP)! z=)m@2ox-@c8e{CAf6tRNNnQHybbrtDk#AKP2x)^zJ;QllPcDUT%D311;V>|JF!tNh z%Bo}RHwTzr|NdXtwP|o6*X+T+ud}glm@AF3{`)%SGr@--`jo_;q6&TouanW*lF|>5 z{P{fQb?`sdHVMq-Rz&~24*zo7XC&6+Ki`S@3~M8>RTtMsN-u++aD(l`>Wtuy-u3~* zX$>t``uDpr|HI#j4yTk@0^M~GQ1_4Gjhd^!--j8tcsApIt^q!)6z&MZ^-}KyjR#se z2M?k7zX$lg3}B+c$KVB(=>PRQ|8C>JnK%DR%M~Q&@8j-|mbv@eSotG{??A3{5*Zy` zBuFo@+?r~@zk0O|$W4jYu9f|GCGmKbW5;Z37#TTv;K{)sv|0^l93xah9YtWV5AVwqa@#@N3cF!d@Ooc`ox$l&2Q%CaT|~m@ zN(cjI8L@G#n?f|5nD*x<>atmzfPPdyh^Bu{5Yn~q^__WzQ9o!w_*USNKJ!b^e4@hn zA+Y_nPqv!ukZ(r9?R*f`ZjuRt#*x4@)v3A%I5IlmJz>KNa3lgnvS3YZj%JYL>*dx0)_MSv|cOug$?+(q}^j`6mOR#PpduS*kVz*#wPe`;Yum z`n+7Jmk}4BMJcA?sNB@`2OpR1UL8=FP;=G4C}~Ph7dyeNB#tfyrbFTMJd|upBqDmG zGOX@^ez8@M@GaOycGH1Pmn#Y2W#H@wodL1IPq~@;_2IA?84Ph{{jqh=`w%b`lNaxS zY4P16q+K(C=vsrCd0SD(&;7Sv)F%dHP-ADRgCbsdre@ z+ro=baTb}I0s62FrC9c7s=StRwSw>)UW?BlIeR>{Ke+uhD|(X-&f%HlC2-* z@F*o8-gg>aW8>=X>>g2txP}l?4z!?+lVduNu3ua`UB{{%P5tU+hI8gs@Iqobfa&?G z;x#1ssvPUmj`i1sd-BaQj;~1G)kVg$tHsQKgHaWb*L6xQRDSfPF@tk__<&T7Qew6t zNUMXI-|!WtHwZA9jQaWyYk}E%oRR32re4riDLc_HZB`!4_N3>%SnYJKwt3pEi$H&z zi;Kw@)8^d>;bWQ`CiQt+yqo@h_-GKSOG4(Re$p>&Nqc#ofAQ1mtF{2n9nj9NzsA-2 zD(y|>rrwhBmFyd!x0f_~Jwb1>lWC2KlaiEg@>*JW1}dIqo3S|ZX^z@rZKg!Ig}A{Qr}5JBXq0k zofjj=zFW-lOwGL153s8~C~0;Cb)a3*cEQ7!fZS2ki+e^_OXnE!n$%1BAFpsTdM(^E zA=*UXs)oJo_=3LYbnX15Hy0Mfx6{2>f88EeloZ**?|D^c;}Bi zx=ul+E+3s(i>B&8&Z!WZG7e*+lU&_1hL!e-=MF+_3!DG!r5xR^Yhtd7_qczKlv<9B z0!OpT|07bUAW@>rh{~hQ7aJUp(v$r{GGBAquR08Wd~jD@He;|Qmn9-YH6R9}i=eU1 zEDd_9b_WkApV5Bk(t7Zba`%qa+mQ%6pXxOrWI2Nhxxx8N_VhVlYNTae$b`wflgX8- zlIf5cmRTzBviykmU#FiB@<2RVJi9kJ?laDcHRn#sG>MvAz>1{y!`5%$k}?&X;K<9% z`xTa4Wbf>Sld=Eu0ri%+^ex7jHm4l7-{#{ftD?ki~`jZ48o5w{ks8C@fa5rR5B$ z&&G**=z%yPVCi&M`T~u_uTX(@e=gX^m^_U<5lE^ z_S*C!-Zq*ML)rxmhu6h@Ym4MHqScwXEGT!hxFfwE#{XtftVJ(f%5`yhEhsGu`KHqln%T-!DN zHGRXdX3f_1Cy8NQzl1v8Ukxo@NiVSqcukuVQPL+^iafK@bO2m_2j!07qZN(ImZAYE zyzCErKeok;F|y$cSE${u5o8dCC#^8RZjmrZYUc`W)0q|GGE!B>d1ZM}cVp3b{+-o5 z&wr*QZt6W+P3AF~oq6n0OJ3de2HP3Tc0O;q z_E=?nfbQnG`fh+WUAHs$WSxfD+IK*0XBLsutU1KspI~k@#9N~Kw4b>{!`dw*-@N{4 z0PC-pw{DhrvprXZ;eBk%Ri!yJTjLlj*U|((%VW?Ws1dmx+RFOqC_6lD)|z);GLWDu zOJA0IypLzOm={}o6666-4=9X2hl zTBT?<*MC%z?{)Nq4~en@Ui>f?4pA|&YM#`62)lOu)CB9?Gx9iBbO3ZjZQyW>OmLos zvpjD~%O2cwt1Hx(pg6o5vY?zo0) zc*MX(pw}2Y-brXebGY1YF59@+yUCGdv$yET9Et{$Kp9CE(_N=vBa$B$C0GB>J9#ch zs{vu%jgE_#V#hm(hgoKG}Hn-e^r3gNgoefj(zSP53CxX0&#lO>U=D=eL1Hj38_c5?@bH`olEq@9p(O6mpM zl=~twUrFLMxx;it)2`f%?}QN|rYVx#n9==6&5=Y~QnU+tl*#vI7YRaw=|2f4YJsyh zTsb4I;{Rp*Qj37zA>irX^9@JPvMofyN={%QI#FwK5+}881W?olA^_W?!=fyKr`rYKZbLF_K ziX1eQz76Ex>sI>K{v%nh~m;G)z*&4QHXM{=%eIku$7y>u>KjU2b!e$OlU%zdb+cNreRN>~B z#gyB8uGg=Y z!z8Hq;WED|byQ!+=+cbonX}kEaaiL`S%ht@29DJbR`FT*L2I_hAE|!cp0Ath_MUuA zmyq&$Hqiva6wS?iwQ4~9DdFWos3(QEv^k0Fmuq5y|AegRlGyiY?hxrB zLPwgb>Sa^GC-?B8*pExn;kNt*T4p8!x*^LpWdb7}_ViFT-;do%WV^Pb;`Vr|4`@Ej z-Gn;%&b>DTw)}4^!=4>VA=ANdJ7D-yw1i=qNsz>|A&+f>th#2`j^D=V9c!4FJjUdZ zIjG0=6xe=0Q-QQ=GZseCpQ!KSxqTVo?mLuD6thb}e_x6-&*|uwisIP^*4^-OtV=9` z$*#u9yih%Vf>m~^7Se07Ogum&-ZfbHoU59Nd9Dv6JJC2>WXGI6v*4`UP?&e0D>@53 z5`uTAVYQs#s?f6+!mfYzv+14^+RB2ZR-f##N|KWCJ;!xq^PMwDzlG)dvy=Pqbhp=? zm0qdH_j7BYB{bKH7@P4qlyUuujsb-wA$0v{AA$KvX~QyX5NthnsrN)#*Sf!3*Ku+!$rNqO z77eRtAS$4c6MNEKy7XQ2+N-Z#6yikclLV8%@&<)-$=OrajV%Z-e1nb2UaXzFbNU+oqA1%LD>l$CTg`K;jA(Z zf_W(kM%ND3f=Nn|vpB6cqo$v42u1r{eyIdNeV4j@p%hoSl8m?>JmYnCn{^S5rR082 zRol^AA(!R_dulpN5s$m}WHxHRfQ7KhSiRVg&?s&34>(YolJ@Ks+IpRnXc5wHJ5*lt z?}Vk5ps~KIsPY?ht^HPr1Y0K1kuD@(i_7}?%#e(tw2Ts#slCjPSxs->+Q2yuu`zJ~i@72}H<14E9qUDBxLKhP{&ng@`k9?F)9}N( z^qs>Rl3v-D)c-hbe*hR3d_m9_<1Y?+?J@bk$Tmwg$G<40f|L5c2<87fmh-<{px6>O-p(3X+2pyCDMyWgaGLyOfj57Rh9>XbblnS5QYM*_VgUd| zVHn-uNf~s|04Sq2=zR_xFM5D)Cmr^8Nhw#0Zye+)$?e8r_3$(BpHFCT7d8l zR1kRT$3za;R3c}3B&L;trOX@|58$aRfInd%4XE4ltNtuCOJD&@x-MP?Aw~fURHwxP zNbxdk^py%FWsU(TSf&{=&~rXo0u(1<Fe`?B%1Vd0)5iVF z>*o(7Kzo>T0(624K#NL%rVVTT1OcBI0K<^Iyf~9Ur^@o#nsp^7;ev#PE;qAj*kY~ACY5txvR+FSn@D97ptkLH(=b*g7 zswIHwmRkL?cP{MdnW_k?UjF={K3N0_y_>+V0Q#Zos5>@8b z=b>6Q)aAfHhVo~>D7xO=fCD7z8TTnzJR=AqUme!f_d187(QW0v@gwAI4NxSdAHZnF zk93MnWXm0=nVQb_lT57JVYEi4xtnya`1O)wP=s^Up9mXd*LHU_Y5{FRfj#nqYCQx%m;SZ9^pQaxK9Sqvy;OkIHiPP2+LbNCM(-U#0qjIiv*Ruu=;m<+L?T}(z&=L2o zDPu?Y$w8R)DP4;*6wVk)zhovXrfkx^pyjH)RpFmQZUoMtB-k$x+~ zaLympPEmZ-(o@=iP*fFG?pFqPK-)}bM)*MtTg0?MmMXEp5I{jWAv~~;+T}aD5ZTkK z2WkB*gbAH-+-r1HsxKLShZ1Jwen%I=L#r`)!IXUZ{Q4FlE5;BaOa%rpI@q0(^WX}) z5F+YOlm!xCxO*i-!Myh$zvI;{5KENxsf?NEPLd#<_T43xJNiK(kiheJ`HSu)?O3O@ z>g3GuX~#&)ocL0Ykdq#4uOY9JqTtsI&?5~Z5fuX637(sXO4P!sKUIlH1#HC>(0d8j z)ELIU%1mL5yF6kP8N1BmvrIgp?Icmb66_(crk8)-*g!el6S(cV^G!iq3 z#u7784o)u*z5JJQ)BGcWbFf@XQg)b9!K(2tQ_?&;Ft7u{=L=7J`Zg#qyaSXOK#|)i z@<4dVt6}{_OkoctT6l;;NS5G72h%SrgrWqAVT!QVERp$s_tam#6?kfWe=D`saV*g|4A)0bT>i}R**KM|VWTw@RgkH<>(-}c% zr)&H6vH6>}^_HT8TSC^*i6Uy!3;e#hhv~O&M!zN_BqoHoezDXo(C*%@ladh{EM5L0 z@;x#}*3K4@pb|C>5oQR0>5%$|l0^fi#TpMu73!COt4Vxb8)3-3$0)+_XdTK(^2#KF zrjFiTtt#%SuV>NKvVYF9%^-}K6KoT%Ss*lryp27WP7`1jNySbNGt}njL*@#3CL5X~ zp@TJSk@3roy*sYHJzGz^XmuNTGJab4WTd+G9#3AOyaq~r+sF3O#P}ec5FG~sx4Mk|(vE<@^ zgI(c|G}8Z-#uU%=te)H0DGpTd!O<8=UXCnC@BIRy$dgqin{u-}L|>2>(L7)(Y<($Kvn9wwO6XR%5>*va#x+TX20jteLq^p2te%1t6? z+WCq|3|D&qAq~!4&0L<{gnwJ&7AAFX7^F~O4&{=W6j#6cqs63JCa}lU+A@QB!f%0q z$}jC&x8#*qBDKx=SgwmKSyXE%;K9uu)t6^J`3=;8QQg)zdbry<_h`oU=W(o!2rh4aEeqB`t1U;^Vx)rRLxt8;3I@v_xQ>d~^BH0Dzcntgf&->1?Br^Kx2i;8cn!CG|EN@4Xdi?>86KRkZAFJjBvA=VCc`4cQ`OVxeoqBG zVR$~AY@4#C?I}c#{DLpv0pX$$afk$#SifHkc0gbV8k)u7b}WUBs7ae3$+`)-Zk2+M zx-`5XuJPKOYCfrKPB%{o2R02D)7B#-i_SpQFP2<3b8DiP_tQ?kNIg1ifNdfiZf7X% zG<#S", + "Hamdah Shafqat abbasi " + ] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" +filepattern = "^2.0.4" +typer = "^0.7.0" +tqdm = "^4.64.1" +preadator="0.4.0.dev2" +vaex = "^4.17.0" +scikit-learn = "^1.3.2" +pyod = "^1.1.2" + +[tool.poetry.group.dev.dependencies] +pre-commit = "^3.3.3" +bump2version = "^1.0.1" +pytest = "^7.3.2" +pytest-xdist = "^3.3.1" +pytest-sugar = "^0.9.7" +ipykernel = "^6.28.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/clustering/outlier-removal-tool/run-docker.sh b/clustering/outlier-removal-tool/run-docker.sh new file mode 100644 index 0000000..f2c3472 --- /dev/null +++ b/clustering/outlier-removal-tool/run-docker.sh @@ -0,0 +1,19 @@ +version=$( None: + """Remove outliers from the data.""" + logger.info(f"--inpDir = {inp_dir}") + logger.info(f"--filePattern = {file_pattern}") + logger.info(f"--method = {method}") + logger.info(f"--outputType = {output_type}") + logger.info(f"--outDir = {out_dir}") + + inp_dir = inp_dir.resolve() + out_dir = out_dir.resolve() + + assert inp_dir.exists(), f"{inp_dir} does not exist!! Please check input path again" + assert ( + out_dir.exists() + ), f"{out_dir} does not exist!! Please check output path again" + + files = fp.FilePattern(inp_dir, file_pattern) + + if preview: + with Path.open(Path(out_dir, "preview.json"), "w") as jfile: + out_json: dict[str, Any] = { + "filepattern": file_pattern, + "outDir": [], + } + for file in files(): + outname = file[1][0].name.replace( + "".join(file[1][0].suffixes), + f"_{output_type}{rm.POLUS_TAB_EXT}", + ) + + out_json["outDir"].append(outname) + json.dump(out_json, jfile, indent=2) + + else: + with preadator.ProcessManager( + name="Cluster data using HDBSCAN", + num_processes=num_workers, + threads_per_process=2, + ) as pm: + for file in files(): + pm.submit_process( + rm.outlier_detection, + file[1][0], + method, + output_type, + out_dir, + ) + pm.join_processes() + + +if __name__ == "__main__": + app() diff --git a/clustering/outlier-removal-tool/src/polus/tabular/clustering/outlier_removal/outlier_removal.py b/clustering/outlier-removal-tool/src/polus/tabular/clustering/outlier_removal/outlier_removal.py new file mode 100644 index 0000000..cb7364b --- /dev/null +++ b/clustering/outlier-removal-tool/src/polus/tabular/clustering/outlier_removal/outlier_removal.py @@ -0,0 +1,135 @@ +"""Outlier Removal Plugin.""" +import enum +import logging +import os +from pathlib import Path + +import numpy as np +import vaex +from pyod.models.iforest import IForest +from sklearn.ensemble import IsolationForest +from sklearn.preprocessing import StandardScaler + +logger = logging.getLogger(__name__) +logger.setLevel(os.environ.get("POLUS_LOG", logging.INFO)) +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".arrow") + +CHUNK_SIZE = 10000 + + +class Methods(str, enum.Enum): + """Available outlier detection methods.""" + + ISOLATIONFOREST = "IsolationForest" + IFOREST = "IForest" + DEFAULT = "IsolationForest" + + +class Outputs(str, enum.Enum): + """Output Files.""" + + INLIER = "inlier" + OUTLIER = "outlier" + COMBINED = "combined" + DEFAULT = "inlier" + + +def write_outputs(data: vaex.DataFrame, outname: Path) -> None: + """Write outputs in either arrow or csv file formats. + + Args: + data: vaex dataframe. + outname: Name of output file. + """ + if POLUS_TAB_EXT == ".arrow": + data.export_feather(outname) + logger.info(f"Saving outputs: {outname}") + if POLUS_TAB_EXT == ".csv": + data.export_csv(outname, chunk_size=CHUNK_SIZE) + logger.info(f"Saving outputs: {outname}") + + +def isolationforest(data_set: np.ndarray, method: Methods) -> np.ndarray: + """Isolation Forest algorithm. + + Args: + data_set: Input data. + method: Type of method to remove outliers. + + Returns: + ndarray whether or not the data point should be considered as an inlier. + + """ + if method == Methods.ISOLATIONFOREST: + clf = IsolationForest(random_state=19, n_estimators=200) + + if method == Methods.IFOREST: + clf = IForest(random_state=10, n_estimators=200) + + if method == Methods.DEFAULT: + clf = IsolationForest(random_state=19, n_estimators=200) + + clf.fit(data_set) + return clf.predict(data_set) + + +def outlier_detection( + file: Path, + method: Methods, + output_type: Outputs, + out_dir: Path, +) -> None: + """Detects outliers using Isolation Forest algorithm. + + Args: + file: Input tabular data. + method: Select a method to remove outliers. + output_type: Select type of output file. + out_dir: Path to output directory. + """ + if Path(file.name).suffix == ".csv": + data = vaex.from_csv(file, convert=True, chunk_size=CHUNK_SIZE) + else: + data = vaex.open(file) + + int_columns = [ + feature + for feature in data.get_column_names() + if data.data_type(feature) == int or data.data_type(feature) == float + ] + + if len(int_columns) == 0: + msg = "Features with integer datatype do not exist" + raise ValueError(msg) + + # Standardize the data + df = StandardScaler().fit_transform(data[int_columns]) + + # Detect outliers + logger.info("Detecting outliers using " + method) + rem_out = isolationforest(df, method) + + data["anomaly"] = rem_out + + if method == Methods.ISOLATIONFOREST or method == Methods.DEFAULT: + inliers = data[data["anomaly"] == 1] + outliers = data[data["anomaly"] == -1] + + if method == Methods.IFOREST: + inliers = data[data["anomaly"] == 0] + outliers = data[data["anomaly"] == 1] + + # Drop 'anomaly' column + inliers = inliers.drop("anomaly", inplace=True) + outliers = outliers.drop("anomaly", inplace=True) + + outname = Path(out_dir, f"{Path(file.name).stem}_{output_type}{POLUS_TAB_EXT}") + + if output_type == Outputs.INLIER: + write_outputs(inliers, outname) + if output_type == Outputs.OUTLIER: + write_outputs(outliers, outname) + if output_type == Outputs.COMBINED: + write_outputs(data, outname) + if output_type == Outputs.DEFAULT: + write_outputs(inliers, outname) diff --git a/clustering/outlier-removal-tool/tests/__init__.py b/clustering/outlier-removal-tool/tests/__init__.py new file mode 100644 index 0000000..727cdca --- /dev/null +++ b/clustering/outlier-removal-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Test Outlier Removal Plugin.""" diff --git a/clustering/outlier-removal-tool/tests/conftest.py b/clustering/outlier-removal-tool/tests/conftest.py new file mode 100644 index 0000000..1829c1a --- /dev/null +++ b/clustering/outlier-removal-tool/tests/conftest.py @@ -0,0 +1,54 @@ +"""Test fixtures. + +Set up all data used in tests. +""" +import tempfile +from pathlib import Path + +import numpy as np +import pandas as pd +import pytest + + +@pytest.fixture( + params=[ + (5000, ".csv", "IsolationForest", "combined"), + (100000, ".arrow", "IForest", "inlier"), + (500000, ".csv", "IsolationForest", "outlier"), + ], +) +def get_params(request: pytest.FixtureRequest) -> tuple[int, str]: + """To get the parameter of the fixture.""" + return request.param + + +@pytest.fixture() +def generate_synthetic_data( + get_params: tuple[int, str, str, str], +) -> tuple[Path, Path, str, str, str]: + """Generate tabular data.""" + nrows, file_extension, method, output_type = get_params + + input_directory = Path(tempfile.mkdtemp(prefix="inputs_")) + output_directory = Path(tempfile.mkdtemp(prefix="out_")) + rng = np.random.default_rng() + tabular_data = { + "sepal_length": rng.random(nrows).tolist(), + "sepal_width": rng.random(nrows).tolist(), + "petal_length": rng.random(nrows).tolist(), + "petal_width": rng.random(nrows).tolist(), + "species": rng.choice( + ["Iris-setosa", "Iris-versicolor", "Iris-virginica"], + nrows, + ).tolist(), + } + + df = pd.DataFrame(tabular_data) + if file_extension == ".csv": + outpath = Path(input_directory, "data.csv") + df.to_csv(outpath, index=False) + if file_extension == ".arrow": + outpath = Path(input_directory, "data.arrow") + df.to_feather(outpath) + + return input_directory, output_directory, file_extension, method, output_type diff --git a/clustering/outlier-removal-tool/tests/test_cli.py b/clustering/outlier-removal-tool/tests/test_cli.py new file mode 100644 index 0000000..c1c24e9 --- /dev/null +++ b/clustering/outlier-removal-tool/tests/test_cli.py @@ -0,0 +1,59 @@ +"""Test Command line Tool.""" +from typer.testing import CliRunner +from polus.tabular.clustering.outlier_removal.__main__ import app +import shutil +from pathlib import Path + + +def test_cli(generate_synthetic_data: tuple[Path, Path, str, str, str]) -> None: + """Test the command line.""" + inp_dir, out_dir, file_extension, method, output_type = generate_synthetic_data + file_pattern = f".*{file_extension}" + + runner = CliRunner() + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--filePattern", + file_pattern, + "--method", + method, + "--outputType", + output_type, + "--outDir", + out_dir, + ], + ) + + assert result.exit_code == 0 + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) + + +def test_short_cli(generate_synthetic_data: tuple[Path, Path, str, str, str]) -> None: + """Test short command line.""" + inp_dir, out_dir, file_extension, method, output_type = generate_synthetic_data + file_pattern = f".*{file_extension}" + + runner = CliRunner() + result = runner.invoke( + app, + [ + "-i", + inp_dir, + "-f", + file_pattern, + "-m", + method, + "-ot", + output_type, + "-o", + out_dir, + ], + ) + + assert result.exit_code == 0 + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) diff --git a/clustering/outlier-removal-tool/tests/test_outlier_removal.py b/clustering/outlier-removal-tool/tests/test_outlier_removal.py new file mode 100644 index 0000000..68d2867 --- /dev/null +++ b/clustering/outlier-removal-tool/tests/test_outlier_removal.py @@ -0,0 +1,46 @@ +"""Test Outlier Removal Plugin.""" +import shutil +from pathlib import Path + +import filepattern as fp +import numpy as np +import polus.tabular.clustering.outlier_removal.outlier_removal as rm +import vaex + + +def test_outlier_detection( + generate_synthetic_data: tuple[Path, Path, str, str, str], +) -> None: + """Test outlier detection of tabular data.""" + inp_dir, out_dir, file_extension, method, output_type = generate_synthetic_data + + file_pattern = f".*{file_extension}" + files = fp.FilePattern(inp_dir, file_pattern) + for file in files(): + rm.outlier_detection( + file=file[1][0], + method=method, + output_type=output_type, + out_dir=out_dir, + ) + out_ext = [Path(f.name).suffix for f in out_dir.iterdir()] + assert all(out_ext) is True + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) + + +def test_isolationforest( + generate_synthetic_data: tuple[Path, Path, str, str, str], +) -> None: + """Test isolationforest method.""" + inp_dir, out_dir, file_extension, method, output_type = generate_synthetic_data + file_pattern = f".*{file_extension}" + files = fp.FilePattern(inp_dir, file_pattern) + for file in files(): + df = vaex.open(file[1][0]) + data = df[df.column_names[:-1]].values + prediction = rm.isolationforest(data, method) + assert len(prediction) != 0 + assert type(prediction) == np.ndarray + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) diff --git a/clustering/polus-feature-subsetting-plugin/Dockerfile b/clustering/polus-feature-subsetting-plugin/Dockerfile new file mode 100644 index 0000000..babcd23 --- /dev/null +++ b/clustering/polus-feature-subsetting-plugin/Dockerfile @@ -0,0 +1,24 @@ + +FROM polusai/bfio:2.1.9 + +# from bfio container +# ENV POLUS_EXT=".ome.tif" +# ENV POLUS_LOG="INFO" +# ENV EXEC_DIR="/opt/executables" +# ENV DATA_DIR="/data" + +COPY VERSION / + +ARG EXEC_DIR="/opt/executables" +ARG DATA_DIR="/data" + +RUN mkdir -p ${EXEC_DIR} \ + && mkdir -p ${DATA_DIR}/inputs \ + && mkdir ${DATA_DIR}/outputs + +COPY src ${EXEC_DIR}/ +WORKDIR ${EXEC_DIR} + +RUN pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir + +ENTRYPOINT ["python3", "/opt/executables/main.py"] \ No newline at end of file diff --git a/clustering/polus-feature-subsetting-plugin/README.md b/clustering/polus-feature-subsetting-plugin/README.md new file mode 100644 index 0000000..24ccba6 --- /dev/null +++ b/clustering/polus-feature-subsetting-plugin/README.md @@ -0,0 +1,56 @@ +# Feature Data Subset + +This WIPP plugin subsets data based on a given feature. It works in conjunction with the `polus-feature-extraction-plugin`, where the feature extraction plugin can be used to extract the features such as the mean intensity of every image in the input image collection. + +# Usage +The details and usage of the plugin inputs is provided in the section below. In addition to the subsetted data, the output directory also consists of a `summary.txt` file which has information as to what images were kept and their new filename if they were renamed. + +### Explanation of inputs +Some of the inputs are pretty straighforward and are used commonly across most WIPP plugins. This section is used to provide some details and examples of the inputs that may be a little complicated. The image collection with the following pattern will be used as an example : `r{r+}_t{t+}_p{p+}_z{z+}_c{c+}.ome.tif`, where r,t,p,z,c stand for replicate, timepoint, positon,z-positon, and channel respectively. Consider we have 5 replicates, 3 timepoints, 50 positions, 10 z-planes and 4 channels. + +1. `inpDir` - This contains the path to the input image collection to subset data from. +2. `filePattern` - Filepattern of the input images +3. `groupVar` - This is a mandatory input across which to subset data. This can take either 1 or 2 variables as input and if 2 variables are provided then the second variable will be treated as the minor grouping variable. In our example, if the `z` is provided as input, then within a subcollection, the mean of the feature value will be taken for all images with the same z. Then the z positions will be filtered out based on the input of `percentile` and `removeDirection` variables. Now if `z,c` are provided as input, then 'c' will be treated as the minor grouping variable which means that the mean will be taken for all images with the same z for each channel. Also, the plugin will ensures that the same values of z positions are filtered out across c. +4. `csvDir` - This contains the path to the csv collection containing the feature values for each image. This can be the output of the feature extraction plugin. +5. `feature` - The column name from the csv file that will be used to filter images +6. `percentile` and `removeDirection` - These two variables denote the critieria with which images are filtered. For example, if percentile is `0.1` and removeDirection is set to `Below` then images with feature value below the 10th percentile will be removed. On the other hand, if removeDirection is set to above then all images with feature value greater than the 10th pecentile will be removed. This enables data subsetting from both `brighfield` and `darkfield` microscopy images. + + **Optional Arguments** + +8. `sectionVar` - This is an optional input to segregate the input image collection into sub-collections. The analysis will be done seperately for each sub-collection. In our example, if the user enters `r,t` as the sectionVar, then we will have 15 subcollections (5*3),1 for each combination of timepoint and replicate. If the user enters `r` as sectionVar, then we will have 5 sub collections, 1 for each replicate. If the user wants to consider the whole image collection as a single section, then no input is required. NOTE: As a post processing step, same number of images will be subsetted across different sections. +9. `padding` - This is an optional variable with default value of 0. A delay of 3 means that 3 additional planes will captured on either side of the subsetted data. This can be used as a sanity check to ensure that the subsetted data captures the images we want. For example, in our examples if the following z values were filtered out intitially - 5,6,7 ; then a delay of 3 means that the output dataset will have z positions 2,3,4,5,6,7,8,9,10 if all them exist. +10. `writeOutput` - This is an optional argument with default value `True`. If it is set to true, then both the output image collection and `summary.txt` file will be created. If it is set to false, then the output directory will only consist of summary.txt. This option enables the user to tune the hyperparameters such as percentile, removeDirecton, feature without actually creating the output image collection. + + + +Contact [Gauhar Bains](mailto:gauhar.bains@labshare.org) for more information. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes one input argument and one output argument: + +| Name | Description | I/O | Type | +| ------------------- | ----------------------------------------------------- | ------ | ------------- | +| `--csvDir` | CSV collection containing features | Input | csvCollection | +| `--padding` | Number of images to capture outside the cutoff | Input | int | +| `--feature` | Feature to use to subset data | Input | string | +| `--filePattern` | Filename pattern used to separate data | Input | string | +| `--groupVar` | variables to group by in a section | Input | string | +| `--inpDir` | Input image collection to be processed by this plugin | Input | collection | +| `--percentile` | Percentile to remove | Input | int | +| `--removeDirection` | remove direction above or below percentile | Input | string | +| `--sectionVar` | variables to divide larger sections | Input | string | +| `--writeOutput` | write output image collection or not | Input | boolean | +| `--outDir` | Output collection | Output | collection | + diff --git a/clustering/polus-feature-subsetting-plugin/VERSION b/clustering/polus-feature-subsetting-plugin/VERSION new file mode 100644 index 0000000..a34eaa5 --- /dev/null +++ b/clustering/polus-feature-subsetting-plugin/VERSION @@ -0,0 +1 @@ +0.1.11 \ No newline at end of file diff --git a/clustering/polus-feature-subsetting-plugin/build-docker.sh b/clustering/polus-feature-subsetting-plugin/build-docker.sh new file mode 100644 index 0000000..d9ad137 --- /dev/null +++ b/clustering/polus-feature-subsetting-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(= thresh] + else: + keep_planes = [z for z in planes if feature_dict[z] <= thresh] + + return set(keep_planes) + +def make_uniform(planes_dict, uniques, padding): + """ Ensure each section has the same number of images + + This function makes the output collection uniform in + the sense that it preserves same number of planes across + sections. It also captures additional planes based + on the value of the padding variable + + Args: + planes_dict (dict): planes to keep in different sections + uniques (list): unique values for the major grouping variable + padding (int): additional images to capture outside cutoff + + Returns: + dictionary: dictionary containing planes to keep + """ + + # max no. of planes + max_len = max([len(i) for i in planes_dict.values()]) + + # max planes that can be added on each side + min_ind = min([min(planes_dict[k]) for k in planes_dict]) + max_ind = max([max(planes_dict[k]) for k in planes_dict]) + max_add_left = uniques.index(min_ind) + max_add_right = len(uniques) - (uniques.index(max_ind)+1) + + # add planes in each section based on padding and max number of planes + for section_id, planes in planes_dict.items(): + len_to_add = max_len - len(planes) + len_add_left = min(int(len_to_add)/2+padding, max_add_left) + len_add_right = min(len_to_add - len_add_left+padding, max_add_right) + left_ind = int(uniques.index(min(planes)) - len_add_left) + right_ind = int(uniques.index(max(planes)) + len_add_right)+1 + planes_dict[section_id] = uniques[left_ind:right_ind] + return planes_dict + +def main(inpDir,csvDir,outDir,filePattern,groupVar,percentile, + removeDirection,sectionVar,feature,padding,writeOutput): + """Function containing the main login to subset data + + Args: + inpDir (string): path to input image collection + csvDir (string): path to csv file containing features + outDir (string): path to output collection + filePattern (string): input image filepattern + groupVar (string): grouping variables + percentile (float): cutoff feature percentile + removeDirection (string): subset above or below percentile + sectionVar (string): sectioning variable + feature (string): feature to subset using + padding (int): capture additional images outside of cutoff + writeOutput (boolean): write output image collection or not + """ + + # Get all file names in csvDir image collection + csvDir_files = [f.name for f in Path(csvDir).iterdir() if f.is_file() and "".join(f.suffixes)=='.csv'] + + # Get all file names in inpDir image collection + inpDir_files = [f.name for f in Path(inpDir).iterdir() if f.is_file() and "".join(f.suffixes)=='.ome.tif'] + + # read and concat all csv files + for ind, file in enumerate(csvDir_files): + if ind == 0: + feature_df = pd.read_csv(os.path.join(csvDir, file), header=0) + else: + feature_df = pd.concat([feature_df, pd.read_csv(os.path.join(csvDir, file), header=0)]) + + # store image name and its feature value + feature_dict = {k:v for k,v in zip(feature_df['Image'], feature_df[feature])} + + # seperate filepattern variables into different categories + _,var = filepattern.get_regex(filePattern) + grouping_variables = groupVar.split(',') + section_variables = sectionVar.split(',') + sub_section_variables = [v for v in var if v not in grouping_variables+section_variables] + + # initialize filepattern object + fp = filepattern.FilePattern(inpDir, pattern=filePattern) + uniques = fp.uniques + + [maj_grouping_var, min_grouping_var] = grouping_variables if len(grouping_variables)>1 else grouping_variables+[None] + keep_planes = {} + + logger.info('Iterating over sections...') + # single iteration of this loop gives all images in one section + for file in fp(group_by=sub_section_variables+grouping_variables): + + section_feat_dict = {} + section_keep_planes = [] + section_id = tuple([file[0][i] for i in section_variables]) if section_variables[0] else 1 + + # iterate over files in one section + for f in file: + if min_grouping_var == None: + f[min_grouping_var] = None + + # stote feature values for images + if f[min_grouping_var] not in section_feat_dict: + section_feat_dict[f[min_grouping_var]] = {} + + if f[maj_grouping_var] not in section_feat_dict[f[min_grouping_var]]: + section_feat_dict[f[min_grouping_var]][f[maj_grouping_var]] = [] + + section_feat_dict[f[min_grouping_var]][f[maj_grouping_var]].append(feature_dict[f['file'].name]) + + # average feature value by grouping variable + for key1 in section_feat_dict: + for key2 in section_feat_dict[key1]: + section_feat_dict[key1][key2] = sum(section_feat_dict[key1][key2])/len(section_feat_dict[key1][key2]) + + # find planes to keep based on specified criteria + section_keep_planes.append(filter_planes(section_feat_dict[key1],removeDirection, percentile)) + + # keep same planes within a section, across the minor grouping variable + section_keep_planes = list(section_keep_planes[0].union(*section_keep_planes)) + section_keep_planes = [i for i in range(min(section_keep_planes), max(section_keep_planes)+1) if i in uniques[maj_grouping_var]] + keep_planes[section_id] = section_keep_planes + + # keep same number of planes across different sections + keep_planes = make_uniform(keep_planes, uniques[maj_grouping_var], padding) + + # start writing summary.txt + summary = open(os.path.join(outDir, 'metadata_files', 'summary.txt'), 'w') + + logger.info('renaming subsetted data') + + # reinitialize filepattern object + fp = filepattern.FilePattern(inpDir, pattern=filePattern) + + # rename subsetted data + for file in fp(group_by=sub_section_variables+grouping_variables): + section_id = tuple([file[0][i] for i in section_variables]) if section_variables[0] else 1 + section_keep_planes = keep_planes[section_id] + rename_map = {k:v for k,v in zip(keep_planes[section_id], uniques[maj_grouping_var])} + + # update summary.txt with section renaming info + summary.write('------------------------------------------------ \n') + if sectionVar.strip(): + summary.write('Section : {} \n'.format({k:file[0][k] for k in section_variables})) + logger.info('Renaming files from section : {} \n'.format({k:file[0][k] for k in section_variables})) + summary.write('\nThe following values of "{}" variable have been renamed: \n'.format(maj_grouping_var)) + for k,v in rename_map.items(): + summary.write('{} ---> {} \n'.format(k,v)) + summary.write('\n Files : \n \n') + + # rename and write output + for f in file: + if f[maj_grouping_var] not in keep_planes[section_id]: + continue + + # old and new file name + old_file_name = f['file'].name + file_name_dict = {k.upper():v for k,v in f.items() if k!='file'} + file_name_dict[maj_grouping_var.upper()] = rename_map[f[maj_grouping_var]] + new_file_name = fp.get_matching(**file_name_dict)[0]['file'].name + + # if write output collection + if writeOutput: + shutil.copy2(os.path.join(inpDir, old_file_name),os.path.join(outDir, 'images', new_file_name)) + + summary.write('{} -----> {} \n'.format(old_file_name, new_file_name)) + summary.close() + +if __name__=="__main__": + # Initialize the logger + logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s', + datefmt='%d-%b-%y %H:%M:%S') + logger = logging.getLogger("main") + logger.setLevel(logging.INFO) + + ''' Argument parsing ''' + logger.info("Parsing arguments...") + parser = argparse.ArgumentParser(prog='main', description='Subset data using a given feature') + + # Input arguments + parser.add_argument('--csvDir', dest='csvDir', type=str, + help='CSV collection containing features', required=True) + parser.add_argument('--padding', dest='padding', type=str, + help='Number of images to capture outside the cutoff', required=False) + parser.add_argument('--feature', dest='feature', type=str, + help='Feature to use to subset data', required=True) + parser.add_argument('--filePattern', dest='filePattern', type=str, + help='Filename pattern used to separate data', required=True) + parser.add_argument('--groupVar', dest='groupVar', type=str, + help='variables to group by in a section', required=True) + parser.add_argument('--inpDir', dest='inpDir', type=str, + help='Input image collection to be processed by this plugin', required=True) + parser.add_argument('--percentile', dest='percentile', type=str, + help='Percentile to remove', required=True) + parser.add_argument('--removeDirection', dest='removeDirection', type=str, + help='remove direction above or below percentile', required=True) + parser.add_argument('--sectionVar', dest='sectionVar', type=str, + help='variables to divide larger sections', required=False) + parser.add_argument('--writeOutput', dest='writeOutput', type=str, + help='write output image collection or not', required=False) + # Output arguments + parser.add_argument('--outDir', dest='outDir', type=str, + help='Output collection', required=True) + + # Parse the arguments + args = parser.parse_args() + csvDir = args.csvDir + logger.info('csvDir = {}'.format(csvDir)) + padding = args.padding + padding = 0 if padding==None else int(padding) + logger.info('padding = {}'.format(padding)) + feature = args.feature + logger.info('feature = {}'.format(feature)) + filePattern = args.filePattern + logger.info('filePattern = {}'.format(filePattern)) + groupVar = args.groupVar + logger.info('groupVar = {}'.format(groupVar)) + inpDir = args.inpDir + if (Path.is_dir(Path(args.inpDir).joinpath('images'))): + # switch to images folder if present + fpath = str(Path(args.inpDir).joinpath('images').absolute()) + logger.info('inpDir = {}'.format(inpDir)) + percentile = float(args.percentile) + logger.info('percentile = {}'.format(percentile)) + removeDirection = args.removeDirection + logger.info('removeDirection = {}'.format(removeDirection)) + sectionVar = args.sectionVar + sectionVar = '' if sectionVar is None else sectionVar + logger.info('sectionVar = {}'.format(sectionVar)) + writeOutput = True if args.writeOutput==None else args.writeOutput == 'true' + logger.info('writeOutput = {}'.format(writeOutput)) + outDir = args.outDir + logger.info('outDir = {}'.format(outDir)) + + # create metadata and images folder in outDir + if not os.path.isdir(os.path.join(outDir, 'images')): + os.mkdir(os.path.join(outDir, 'images')) + if not os.path.isdir(os.path.join(outDir, 'metadata_files')): + os.mkdir(os.path.join(outDir, 'metadata_files')) + + # Surround with try/finally for proper error catching + try: + main(inpDir=inpDir, + csvDir=csvDir, + outDir=outDir, + filePattern=filePattern, + groupVar=groupVar, + percentile=percentile, + removeDirection=removeDirection, + sectionVar=sectionVar, + feature=feature, + padding=padding, + writeOutput=writeOutput) + + except Exception: + traceback.print_exc() + + finally: + logger.info('exiting workflow..') + # Exit the program + sys.exit() \ No newline at end of file diff --git a/clustering/polus-feature-subsetting-plugin/src/requirements.txt b/clustering/polus-feature-subsetting-plugin/src/requirements.txt new file mode 100644 index 0000000..b7e965e --- /dev/null +++ b/clustering/polus-feature-subsetting-plugin/src/requirements.txt @@ -0,0 +1,2 @@ +filepattern>=1.4.5 +pandas>=1.1.3 \ No newline at end of file diff --git a/clustering/polus-hdbscan-clustering-plugin/Dockerfile b/clustering/polus-hdbscan-clustering-plugin/Dockerfile new file mode 100644 index 0000000..37129b3 --- /dev/null +++ b/clustering/polus-hdbscan-clustering-plugin/Dockerfile @@ -0,0 +1,10 @@ +FROM polusai/bfio:2.1.9 + +COPY VERSION / +COPY src ${EXEC_DIR}/. + +RUN apt --no-install-recommends -y autoremove --purge python3.9-minimal python3.9\ + && apt-get update && apt-get install --no-install-recommends -y build-essential python3.9-dev\ + && pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir + +ENTRYPOINT ["python3", "/opt/executables/main.py"] \ No newline at end of file diff --git a/clustering/polus-hdbscan-clustering-plugin/README.md b/clustering/polus-hdbscan-clustering-plugin/README.md new file mode 100644 index 0000000..2169be0 --- /dev/null +++ b/clustering/polus-hdbscan-clustering-plugin/README.md @@ -0,0 +1,48 @@ +# Hierarchical Density-Based Spatial Clustering of Applications with Noise(HDBSCAN) Clustering + +The HDBSCAN Clustering plugin clusters the data using [HDBSCAN clustering](https://pypi.org/project/hdbscan/) library. The input and output for this plugin is a CSV file. Each observation (row) in the input CSV file is assigned to one of the clusters. The output CSV file contains the column `cluster` that identifies the cluster to which each observation belongs. A user can supply a regular expression with capture groups if they wish to cluster each group independently, or if they wish to average the numerical features across each group and treat them as a single observation. + +## Inputs: + +### Input CSV collection: +The input file(s) that need to be clustered. The file should be in CSV format. This is a required parameter for the plugin. + +### Grouping pattern: +The input for this parameter is a regular expression with capture group. This input splits the data into groups based on the matched pattern. A new column `group` is created in the output CSV file that has the group based on the given pattern. Unless `averageGroups` is set to `true`, providing a grouping pattern will cluster each group independently. + +### Average groups: +Setting this equal to `true` will use the supplied `groupingPattern` to average the numerical features and produce a single row per group which is then clustered. The resulting cluster is assigned to all observations belonging in that group. + +### Label column: +This is the name of the column containing the labels to be used with `groupingPattern`. + +### Minimum cluster size: +This parameter defines the smallest number of points that should be considered as cluster. This is a required parameter. The input should be an integer and the value should be greater than 1. + +### Increment outlier ID: +This parameter sets the ID of the outlier cluster to `1`, otherwise it will be 0. This is useful for visualization purposes if the resulting cluster IDs are turned into image annotations. + +## Output: +The output is a CSV file containing the clustered data. + +## Building +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Options + +This plugin takes four input arguments and one output argument: + +| Name | Description | I/O | Type | +| ---------------------- | ---------------------------------------------------------------------------------------------- | ------ | ------------- | +| `--inpDir` | Input csv collection. | Input | csvCollection | +| `--groupingPattern` | Regular expression to group rows. Clustering will be applied across capture groups by default. | Input | string | +| `--averageGroups` | If set to `true`, will average data across groups. Requires capture groups | Input | string | +| `--labelCol` | Name of the column containing labels for grouping pattern. | Input | string | +| `--minClusterSize` | Minimum cluster size. | Input | integer | +| `--incrementOutlierId` | Increments outlier ID to 1. | Input | string | +| `--outDir` | Output collection | Output | csvCollection | diff --git a/clustering/polus-hdbscan-clustering-plugin/VERSION b/clustering/polus-hdbscan-clustering-plugin/VERSION new file mode 100644 index 0000000..5546bd2 --- /dev/null +++ b/clustering/polus-hdbscan-clustering-plugin/VERSION @@ -0,0 +1 @@ +0.4.7 \ No newline at end of file diff --git a/clustering/polus-hdbscan-clustering-plugin/build-docker.sh b/clustering/polus-hdbscan-clustering-plugin/build-docker.sh new file mode 100755 index 0000000..7a7f44f --- /dev/null +++ b/clustering/polus-hdbscan-clustering-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$( typing.List[str]: + """List all the .csv files in the directory. + + Args: + csv_directory (str): Path to the directory containing the csv files. + + Returns: + The path to directory, list of names of the subdirectories in dirpath (if any) and the filenames of .csv files. + + """ + list_of_files = [os.path.join(dirpath, file_name) + for dirpath, dirnames, files in os.walk(csv_directory) + for file_name in fnmatch.filter(files, '*.csv')] + return list_of_files + + +def clustering(data: np.ndarray, min_cluster_size: int, increment_outlier_id: bool) -> np.ndarray: + """Cluster data using HDBSCAN. + + Args: + data (array): Data that need to be clustered. + min_cluster_size (int): Smallest size grouping that should be considered as a cluster. + increment_outlier_id (bool) : Increment outlier ID to unity. + + Returns: + Cluster labels for each row of data. + """ + clusters = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size).fit(data) + labels = clusters.labels_.flatten().astype(np.uint16) + 1 + labels = labels + 1 if increment_outlier_id else labels + + return labels + + +# Setup the argument parsing +def main(inpDir, grouping_pattern, avg_groups, label_col, min_cluster_size, increment_outlier_id, outDir): + # Get list of .csv files in the directory including sub folders for clustering + input_csvs = list_files(inpDir) + if input_csvs is None: + raise ValueError('No .csv files found.') + + for csv in input_csvs: + # Get the full path and split to get only the filename. + split_file = os.path.normpath(csv) + file_name = os.path.split(split_file)[-1] + file_prefix, _ = file_name.split('.', 1) + + logger.info('Reading the file ' + file_name) + + # Read csv file + df = pd.read_csv(csv) + + # If user provided a regular expression. + if grouping_pattern is not None: + df = df[df[label_col].str.match(grouping_pattern)].copy() + if df.empty: + logger.warning(f"Could not find any files matching the pattern {grouping_pattern} in file {csv}. Skipping...") + continue + + #Create a column group with matching string + df['group'] = df[label_col].str.extract(grouping_pattern, expand=True).apply(','.join, axis=1) + + # Get column(s) containing data. + df_data = df.select_dtypes(exclude='object').copy() + df_data['group'] = df['group'] + + # If we want to average features for each group. + if avg_groups: + df_grouped = df_data.groupby('group').apply(lambda x: x.sort_values('group').mean(numeric_only=True)) + + # Cluster data using HDBSCAN clustering. + logger.info('Clustering the data') + cluster_ids = clustering(df_grouped.values, min_cluster_size, increment_outlier_id) + + df_grouped['cluster'] = cluster_ids + df = df.merge(df_grouped['cluster'], left_on='group', right_index=True) + else: # We want separate clustering results for each group. + dfs = [] + for group, df_ss in df_data.groupby('group'): + # Cluster data using HDBSCAN clustering. + logger.info(f'Clustering data in group {group}') + + cluster_ids = clustering(df_ss.values, min_cluster_size, increment_outlier_id) + df_ss['cluster'] = cluster_ids + dfs.append(df_ss) + + df_grouped = pd.concat(dfs) + df = df.merge(df_grouped['cluster'], left_index=True, right_index=True) + + # No grouping. Vanilla clustering. + else: + # Get column(s) containing data. + df_data = df.select_dtypes(exclude='object').copy() + + #Cluster data using HDBSCAN clustering + logger.info('Clustering the data') + cluster_ids = clustering(df_data.values, min_cluster_size, increment_outlier_id) + df['cluster'] = cluster_ids + + df.to_csv(os.path.join(outDir, f'{file_prefix}.csv'), index=None, header=True, encoding='utf-8-sig') + logger.info("Finished all processes!") + +if __name__ == "__main__": + logger.info("Parsing arguments...") + parser = argparse.ArgumentParser(prog='main', description='HDBSCAN clustering plugin') + parser.add_argument('--inpDir', dest='inpDir', type=str, + help='Input collection-Data need to be clustered', required=True) + parser.add_argument('--groupingPattern', dest='groupingPattern', type=str, + help='Regular expression to group rows. Clustering will be applied across capture groups.', required=False) + parser.add_argument('--averageGroups', dest='averageGroups', type=str, + help='Whether to average data across groups. Requires capture groups.', default='false', required=False) + parser.add_argument('--labelCol', dest='labelCol', type=str, + help='Name of column containing labels. Required only for grouping operations.', required=False) + parser.add_argument('--minClusterSize', dest='minClusterSize', type=int, + help='Minimum cluster size', required=True) + parser.add_argument('--incrementOutlierId', dest='incrementOutlierId', type=str, + help='Increments outlier ID to 1.', default='false', required=False) + parser.add_argument('--outDir', dest='outDir', type=str, + help='Output collection', required=True) + + # Parse the arguments. + args = parser.parse_args() + + # Path to csvfile directory. + inpDir = args.inpDir + logger.info('inpDir = {}'.format(inpDir)) + + # Regular expression for grouping. + grouping_pattern = args.groupingPattern + logger.info('grouping_pattern = {}'.format(grouping_pattern)) + + # Whether to average data for each group. + avg_groups = args.averageGroups.lower() != 'false' + logger.info('avg_groups = {}'.format(avg_groups)) + + # Name of column to use for grouping. + label_col = args.labelCol + logger.info('label_col = {}'.format(label_col)) + + # Minimum cluster size for clustering using HDBSCAN. + min_cluster_size = args.minClusterSize + logger.info('min_cluster_size = {}'.format(min_cluster_size)) + + # Set outlier cluster id as 1. + increment_outlier_id = args.incrementOutlierId.lower() != 'false' + logger.info('increment_outlier_id = {}'.format(increment_outlier_id)) + + # Path to save output csvfiles. + outDir = args.outDir + logger.info('outDir = {}'.format(outDir)) + + main( + inpDir, + grouping_pattern, + avg_groups, + label_col, + min_cluster_size, + increment_outlier_id, + outDir + ) \ No newline at end of file diff --git a/clustering/polus-hdbscan-clustering-plugin/src/requirements.txt b/clustering/polus-hdbscan-clustering-plugin/src/requirements.txt new file mode 100644 index 0000000..ffd72e0 --- /dev/null +++ b/clustering/polus-hdbscan-clustering-plugin/src/requirements.txt @@ -0,0 +1,2 @@ +hdbscan==0.8.27 +pandas>=1.2.4 diff --git a/features/feature-segmentation-eval-tool/.bumpversion.cfg b/features/feature-segmentation-eval-tool/.bumpversion.cfg new file mode 100644 index 0000000..24647bf --- /dev/null +++ b/features/feature-segmentation-eval-tool/.bumpversion.cfg @@ -0,0 +1,27 @@ +[bumpversion] +current_version = 0.2.6-dev0 +commit = True +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:plugin.json] + +[bumpversion:file:VERSION] + +[bumpversion:file:src/polus/images/features/feature_segmentation_eval/__init__.py] diff --git a/features/feature-segmentation-eval-tool/Dockerfile b/features/feature-segmentation-eval-tool/Dockerfile new file mode 100644 index 0000000..73d0b9f --- /dev/null +++ b/features/feature-segmentation-eval-tool/Dockerfile @@ -0,0 +1,20 @@ +FROM polusai/bfio:2.1.9 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".csv" +ENV POLUS_LOG="INFO" + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +COPY src ${EXEC_DIR}/src + +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +ENTRYPOINT ["python3", "-m", "polus.images.features.feature_segmentation_eval"] +CMD ["--help"] diff --git a/features/feature-segmentation-eval-tool/README.md b/features/feature-segmentation-eval-tool/README.md new file mode 100644 index 0000000..9290a70 --- /dev/null +++ b/features/feature-segmentation-eval-tool/README.md @@ -0,0 +1,28 @@ +# Feature segmentation eval (v0.2.3) + +Plugin to generate evaluation metrics for feature comparison of ground truth and predicted images. Contact [Vishakha Goyal](mailto:vishakha.goyal@nih.gov), [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes six input arguments and one output argument: + +| Name | Description | I/O | Type | +|---------------|-------------------------|--------|--------| +| `--GTDir` | Ground truth feature collection to be processed by this plugin. | Input | genericData | +| `--PredDir` | Predicted feature collection to be processed by this plugin. | Input | genericData | +| `--filePattern` | Filename pattern to filter data. | Input | string | +| `--combineLabels`   | Boolean to calculate number of bins for histogram by combining GT and Predicted Labels | Input | boolean | +| `--singleOutFile`   | Boolean to save output file as a single file.| Input | boolean | +| `--outDir` | Output collection | Output | genericData | +| `--preview` | Generate a JSON file with outputs | Output | JSON | diff --git a/features/feature-segmentation-eval-tool/VERSION b/features/feature-segmentation-eval-tool/VERSION new file mode 100644 index 0000000..fccaf8b --- /dev/null +++ b/features/feature-segmentation-eval-tool/VERSION @@ -0,0 +1 @@ +0.2.6-dev0 diff --git a/features/feature-segmentation-eval-tool/build-docker.sh b/features/feature-segmentation-eval-tool/build-docker.sh new file mode 100644 index 0000000..9ba5a2d --- /dev/null +++ b/features/feature-segmentation-eval-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(", + "Hamdah Shafqat Abbasi " + ] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = "^3.9" +filepattern = "^2.0.1" +opencv_python = "^4.5.1.48" +scikit-learn="^1.4.0" +pandas = "^1.2.4" +scipy = "^1.6.2" +typer = "^0.7.0" +blake3 = "^0.3.3" +llvmlite = "^0.39.1" +fastapi = "^0.92.0" +vaex = "^4.7.0" + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +pre-commit = "^3.1.0" +black = "^23.1.0" +flake8 = "^6.0.0" +mypy = "^1.0.1" +pytest = "^7.2.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/features/feature-segmentation-eval-tool/run-plugin.sh b/features/feature-segmentation-eval-tool/run-plugin.sh new file mode 100644 index 0000000..36e7024 --- /dev/null +++ b/features/feature-segmentation-eval-tool/run-plugin.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +version=$( None: + """Generate evaluation metrics of ground truth and predicted images.""" + logger.info(f"GTDir: {gt_dir}") + logger.info(f"PredDir: {pred_dir}") + logger.info(f"filePattern: {file_pattern}") + logger.info(f"combineLabels: {combine_labels}") + logger.info(f"singleOutFile: {single_out_file}") + logger.info(f"outDir: {out_dir}") + + starttime = time.time() + + if not gt_dir.exists(): + msg = "Groundtruth directory does not exist" + raise ValueError(msg, gt_dir) + if not pred_dir.exists(): + msg = "Predicted directory does not exist" + raise ValueError(msg, pred_dir) + if not out_dir.exists(): + msg = "outDir does not exist" + raise ValueError(msg, out_dir) + + if preview: + logger.info(f"generating preview data in {out_dir}") + with Path.open(Path(out_dir, "preview.json"), "w") as jfile: + out_json: dict[str, Any] = { + "filepattern": file_pattern, + "outDir": [], + } + if single_out_file: + out_name = f"result{fs.POLUS_TAB_EXT}" + out_json["outDir"].append(out_name) + + fps = fp.FilePattern(gt_dir, file_pattern) + for file in fps(): + outname = file[1][0].name.split(".")[0] + out_name = f"{outname}{fs.POLUS_TAB_EXT}" + out_json["outDir"].append(out_name) + json.dump(out_json, jfile, indent=2) + + fs.feature_evaluation( + gt_dir, + pred_dir, + combine_labels, + file_pattern, + single_out_file, + out_dir, + ) + + endtime = (time.time() - starttime) / 60 + logger.info(f"Total time taken for execution: {endtime:.4f} minutes") + + +if __name__ == "__main__": + app() diff --git a/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/feature_evaluation.py b/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/feature_evaluation.py new file mode 100644 index 0000000..5726dad --- /dev/null +++ b/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/feature_evaluation.py @@ -0,0 +1,468 @@ +"""Feature segmentation evaluation package.""" +import logging +import os +from pathlib import Path +from typing import Any +from typing import Optional +from typing import Union + +import cv2 +import filepattern +import numpy as np +import pandas as pd +import scipy.stats +import vaex +from scipy.spatial import distance + +from .metrics import evaluate_all + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".csv") + +EXT = (".arrow", ".feather") +CHUNK_SIZE = 5_000_000 + +HEADER = [ + "Image", + "features", + "histogram intersection", + "correlation", + "chi square", + "bhattacharya distance", + "L1 score", + "L2 score", + "L infinity score", + "cosine distance", + "canberra distance", + "ks divergence", + "match distance", + "cvm distance", + "psi value", + "kl divergence", + "js divergence", + "wasserstein distance", + "Mean square error", + "Root mean square error", + "Normalized Root Mean Squared Error", + "Mean Error", + "Mean Absolute Error", + "Geometric Mean Absolute Error", + "Median Absolute Error", + "Mean Percentage Error", + "Mean Absolute Percentage Error", + "Median Absolute Percentage Error", + "Symmetric Mean Absolute Percentage Error", + "Symmetric Median Absolute Percentage Error", + "Mean Arctangent Absolute Percentage Error", + "Normalized Absolute Error", + "Normalized Absolute Percentage Error", + "Root Mean Squared Percentage Error", + "Root Median Squared Percentage Error", + "Integral Normalized Root Squared Error", + "Root Relative Squared Error", + "Relative Absolute Error (aka Approximation Error)", + "Mean Directional Accuracy", +] + + +def convert_vaex_dataframe(file_path: Path) -> vaex.dataframe.DataFrame: + """The vaex reading of tabular data with (".csv", ".feather", ".arrow") format. + + Args: + file_path: Path to tabular data. + + Returns: + A vaex dataframe. + """ + if file_path.name.endswith(".csv"): + return vaex.read_csv(Path(file_path), convert=True, chunk_size=CHUNK_SIZE) + if file_path.name.endswith(EXT): + return vaex.open(Path(file_path)) + return None + + +def write_outfile(x: vaex.dataframe.DataFrame, out_name: Path) -> None: + """Write an output in vaex supported tabular format.""" + if POLUS_TAB_EXT in [".feather", ".arrow"]: + x.export_feather(out_name) + else: + x.export_csv(path=out_name, chunk_size=CHUNK_SIZE) + + +def comparison( # noqa C901 + expected_array: np.ndarray, + actual_array: np.ndarray, + bin_count: int, +) -> tuple[ + float, + float, + float, + float, + float, + float, + float, + float, + float, + float, + float, + float, + Any, + Any, + float, + float, + Any, +]: + """Calculate the metrics for predicted and ground truth histograms. + + Args: + expected_array: numpy array of original values + actual_array: numpy array of predicted values + bin_count: number of bins provided as an input to calculate histogram. + + Returns: + All metrics + """ + count1, _ = np.histogram(expected_array, bins=bin_count) + pdf1 = count1 / sum(count1) + cdf1 = np.cumsum(pdf1) + + for i in range(0, len(actual_array)): + if actual_array[i] < expected_array.min(): + actual_array[i] = expected_array.min() + if actual_array[i] > expected_array.max(): + actual_array[i] = expected_array.max() + + count2, _ = np.histogram(actual_array, bins=bin_count) + pdf2 = count2 / sum(count2) + cdf2 = np.cumsum(pdf2) + expected_percents = pdf1 + actual_percents = pdf2 + + ### PDF input + def sub_psi(e_perc: Union[float, int], a_perc: Union[float, int]) -> float: + """Compute PSI Value.""" + if a_perc == 0: + a_perc = 0.0001 + if e_perc == 0: + e_perc = 0.0001 + + return (e_perc - a_perc) * np.log(e_perc / a_perc) + + def sub_kld(e_perc: Union[float, int], a_perc: Union[float, int]) -> float: + """Compute KL Divergence.""" + if a_perc == 0: + a_perc = 0.0001 + if e_perc == 0: + e_perc = 0.0001 + + return (e_perc) * np.log(e_perc / a_perc) + + def sub_jsd( + expected_percents: Union[float, int], + actual_percents: Union[float, int], + ) -> float: + """Compute JS Divergence.""" + p = np.array(expected_percents) + q = np.array(actual_percents) + m = (p + q) / 2 + # compute Jensen Shannon Divergence + divergence = (scipy.stats.entropy(p, m) + scipy.stats.entropy(q, m)) / 2 + # compute the Jensen Shannon Distance + return np.sqrt(divergence) + + def l1(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute L1 Distance.""" + return np.sum(abs(pdf1 - pdf2)) + + def l2(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute L2 Distance.""" + return np.sqrt(sum((pdf1 - pdf2) ** 2)) + + def linfinity(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute L-infinity Distance.""" + return np.max(abs(pdf1 - pdf2)) + + def hist_intersect(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute Histogram Intersection.""" + pdf1 = pdf1.astype(np.float32) + pdf2 = pdf2.astype(np.float32) + return cv2.compareHist(pdf1, pdf2, cv2.HISTCMP_INTERSECT) + + def cosine_d(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute cosine distance.""" + return distance.cosine(pdf1, pdf2) + + def canberra(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute Canberra distance.""" + return distance.canberra(pdf1, pdf2) + + def correlation(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute Correlation.""" + pdf1 = pdf1.astype(np.float32) + pdf2 = pdf2.astype(np.float32) + return cv2.compareHist(pdf1, pdf2, cv2.HISTCMP_CORREL) + + def chi_square(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute Chi Square.""" + pdf1 = pdf1.astype(np.float32) + pdf2 = pdf2.astype(np.float32) + return cv2.compareHist(pdf1, pdf2, cv2.HISTCMP_CHISQR) + + def bhattacharya(pdf1: np.ndarray, pdf2: np.ndarray) -> float: + """Compute Bhattacharya Distance.""" + pdf1 = pdf1.astype(np.float32) + pdf2 = pdf2.astype(np.float32) + return cv2.compareHist(pdf1, pdf2, cv2.HISTCMP_BHATTACHARYYA) + + ### CDF input + + def ks_divergence(cdf1: np.ndarray, cdf2: np.ndarray) -> float: + """Compute KS Divergence.""" + return np.max(abs(cdf1 - cdf2)) + + def match(cdf1: np.ndarray, cdf2: np.ndarray) -> float: + """Compute Match Distance.""" + return np.sum(abs(cdf1 - cdf2)) + + def cvm(cdf1: np.ndarray, cdf2: np.ndarray) -> float: + """Compute CVM Distance.""" + return np.sum((cdf1 - cdf2) ** 2) + + def ws_d(cdf1: np.ndarray, cdf2: np.ndarray) -> float: + """Compute Wasserstein Distance.""" + return scipy.stats.wasserstein_distance(cdf1, cdf2) + + ### metrics that take pdf input + psi_value = np.sum( + sub_psi(expected_percents[i], actual_percents[i]) + for i in range(0, len(expected_percents)) + ) + + kld_value = np.sum( + sub_kld(expected_percents[i], actual_percents[i]) + for i in range(0, len(expected_percents)) + ) + + jsd_value = sub_jsd(expected_percents, actual_percents) + + errors = evaluate_all(expected_percents, actual_percents) + + ### metrics that take cdf input + + wd_value = ws_d(cdf1, cdf2) + + return ( + hist_intersect(pdf1, pdf2), + correlation(pdf1, pdf2), + chi_square(pdf1, pdf2), + bhattacharya(pdf1, pdf2), + l1(pdf1, pdf2), + l2(pdf1, pdf2), + linfinity(pdf1, pdf2), + cosine_d(pdf1, pdf2), + canberra(pdf1, pdf2), + ks_divergence(cdf1, cdf2), + match(cdf1, cdf2), + cvm(cdf1, cdf2), + psi_value, + kld_value, + jsd_value, + wd_value, + errors, + ) + + +def feature_evaluation( # noqa C901 + gt_dir: Path, + pred_dir: Path, + combine_labels: Optional[bool], + file_pattern: str, + single_out_file: Optional[bool], + out_dir: Path, +) -> None: + """Generate evaluation metrics of ground truth and predicted images. + + Args: + gt_dir: Ground truth directory + pred_dir: Predicted directory + combine_labels: Calculate no of bins by combining GT and Predicted Labels + file_pattern: Pattern to parse data + single_out_file: Outputs in single combined or in separate files. + out_dir: Output directory. + """ + fp = filepattern.FilePattern(gt_dir, file_pattern) + + if single_out_file: + lst: list[Any] = [] + + header = [ + "Image", + "features", + "histogram intersection", + "correlation", + "chi square", + "bhattacharya distance", + "L1 score", + "L2 score", + "L infinity score", + "cosine distance", + "canberra distance", + "ks divergence", + "match distance", + "cvm distance", + "psi value", + "kl divergence", + "js divergence", + "wasserstein distance", + "Mean square error", + "Root mean square error", + "Normalized Root Mean Squared Error", + "Mean Error", + "Mean Absolute Error", + "Geometric Mean Absolute Error", + "Median Absolute Error", + "Mean Percentage Error", + "Mean Absolute Percentage Error", + "Median Absolute Percentage Error", + "Symmetric Mean Absolute Percentage Error", + "Symmetric Median Absolute Percentage Error", + "Mean Arctangent Absolute Percentage Error", + "Normalized Absolute Error", + "Normalized Absolute Percentage Error", + "Root Mean Squared Percentage Error", + "Root Median Squared Percentage Error", + "Integral Normalized Root Squared Error", + "Root Relative Squared Error", + "Relative Absolute Error (aka Approximation Error)", + "Mean Directional Accuracy", + ] + for file in fp(): + file_path = file[1][0] + file_name = file[1][0].name + if file[1][0].name.endswith((".csv", ".feather", ".arrow")): + df_gt = convert_vaex_dataframe(file_path) + + pred_fpath = Path(pred_dir, file_name) + if not pred_fpath.exists(): + continue + df_pred = convert_vaex_dataframe(pred_fpath) + + feature_list = [ + feature + for feature in df_gt.get_column_names() + if feature not in ["mask_image", "intensity_image", "label"] + if feature in df_pred.get_column_names() + ] + if not single_out_file: + lst = [] + + for feature in feature_list: + z_gt = df_gt[f"{feature}"].values + z_pred = df_pred[f"{feature}"].values + z_gt = np.array(z_gt, dtype=float) + z_pred = np.array(z_pred, dtype=float) + z_gt = z_gt[~np.isnan(z_gt)] + z_pred = z_pred[~np.isnan(z_pred)] + predsize = 0 + if z_pred.size > predsize and z_gt.size > predsize: + logger.info(f"evaluating feature {feature} for {file_name}") + expected_array = z_gt + actual_array = z_pred + if combine_labels: + combined = np.concatenate((actual_array, expected_array)) + q1 = np.quantile(combined, 0.25) + q3 = np.quantile(combined, 0.75) + iqr = q3 - q1 + bin_width = (2 * iqr) / (len(combined) ** (1 / 3)) + if bin_width == float(0.0) or np.isnan(bin_width): + continue + bin_count = np.ceil((combined.max() - combined.min()) / (bin_width)) + else: + q1 = np.quantile(expected_array, 0.25) + q3 = np.quantile(expected_array, 0.75) + iqr = q3 - q1 + bin_width = (2 * iqr) / (len(expected_array) ** (1 / 3)) + if bin_width == float(0.0) or np.isnan(bin_width): + continue + bin_count = np.ceil( + (expected_array.max() - expected_array.min()) / (bin_width), + ) + if bin_count > 2**16 or np.isnan(bin_count) or bin_count == 0: + continue + bin_count = int(bin_count) + + ( + hist_intersect, + correlation, + chi_square, + bhattacharya, + l1, + l2, + linfinity, + cosine_d, + canberra, + ks_divergence, + match, + cvm, + psi_value, + kld_value, + jsd_value, + wd_value, + errors, + ) = comparison(z_gt, z_pred, bin_count) + data_result = [ + file_name, + feature, + hist_intersect, + correlation, + chi_square, + bhattacharya, + l1, + l2, + linfinity, + cosine_d, + canberra, + ks_divergence, + match, + cvm, + psi_value, + kld_value, + jsd_value, + wd_value, + errors.get("mse"), + errors.get("rmse"), + errors.get("nrmse"), + errors.get("me"), + errors.get("mae"), + errors.get("gmae"), + errors.get("mdae"), + errors.get("mpe"), + errors.get("mape"), + errors.get("mdape"), + errors.get("smape"), + errors.get("smdape"), + errors.get("maape"), + errors.get("std_ae"), + errors.get("std_ape"), + errors.get("rmspe"), + errors.get("rmdspe"), + errors.get("inrse"), + errors.get("rrse"), + errors.get("rae"), + errors.get("mda"), + ] + lst.append(data_result) + + if not single_out_file: + df = vaex.from_pandas(pd.DataFrame(lst, columns=header)) + outname = file_name.split(".")[0] + POLUS_TAB_EXT + write_outfile(df, Path(out_dir, outname)) + + if single_out_file: + df = vaex.from_pandas(pd.DataFrame(lst, columns=header)) + outname = "result" + POLUS_TAB_EXT + write_outfile(df, Path(out_dir, outname)) + + logger.info("evaluation complete.") diff --git a/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/metrics.py b/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/metrics.py new file mode 100644 index 0000000..494f52e --- /dev/null +++ b/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/metrics.py @@ -0,0 +1,247 @@ +"""Feature segmentation evaluation package.""" +## Source: https://gist.github.com/bshishov/5dc237f59f019b26145648e2124ca1c9 + +import logging +from typing import Optional + +import numpy as np + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +EPSILON = 1e-10 + + +def _error(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Simple error.""" + return actual - predicted + + +def _percentage_error(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Percentage error. + + Note: result is NOT multiplied by 100. + """ + return _error(actual, predicted) / (actual + EPSILON) + + +def _geometric_mean( + a: np.ndarray, + axis: Optional[int] = 0, + dtype: Optional[np.dtype] = None, +) -> np.ndarray: + """Geometric mean.""" + if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it + log_a = np.log(np.array(a, dtype=dtype)) + elif dtype: # Must change the default dtype allowing array type + if isinstance(a, np.ma.MaskedArray): + log_a = np.log(np.ma.asarray(a, dtype=dtype)) + else: + log_a = np.log(np.asarray(a, dtype=dtype)) + else: + log_a = np.log(a) + return np.exp(log_a.mean(axis=axis)) + + +def mse(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Mean Squared Error.""" + return np.mean(np.square(_error(actual, predicted))) + + +def rmse(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Root Mean Squared Error.""" + return np.sqrt(mse(actual, predicted)) + + +def nrmse(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Normalized Root Mean Squared Error.""" + return rmse(actual, predicted) / (actual.max() - actual.min()) + + +def me(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Mean Error.""" + return np.mean(_error(actual, predicted)) + + +def mae(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Mean Absolute Error.""" + return np.mean(np.abs(_error(actual, predicted))) + + +mad = mae # Mean Absolute Deviation (it is the same as MAE) + + +def gmae(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Geometric Mean Absolute Error.""" + return _geometric_mean(np.abs(_error(actual, predicted))) + + +def mdae(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Median Absolute Error.""" + return np.median(np.abs(_error(actual, predicted))) + + +def mpe(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Mean Percentage Error.""" + return np.mean(_percentage_error(actual, predicted)) + + +def mape(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Mean Absolute Percentage Error. + + Properties: + + Easy to interpret + + Scale independent + - Biased, not symmetric + - Undefined when actual[t] == 0 + Note: result is NOT multiplied by 100. + """ + return np.mean(np.abs(_percentage_error(actual, predicted))) + + +def mdape(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Median Absolute Percentage Error. + + Note: result is NOT multiplied by 100. + """ + return np.median(np.abs(_percentage_error(actual, predicted))) + + +def smape(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Symmetric Mean Absolute Percentage Error. + + Note: result is NOT multiplied by 100. + """ + return np.mean( + 2.0 + * np.abs(actual - predicted) + / ((np.abs(actual) + np.abs(predicted)) + EPSILON), + ) + + +def smdape(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Symmetric Median Absolute Percentage Error. + + Note: result is NOT multiplied by 100. + """ + return np.median( + 2.0 + * np.abs(actual - predicted) + / ((np.abs(actual) + np.abs(predicted)) + EPSILON), + ) + + +def maape(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Mean Arctangent Absolute Percentage Error. + + Note: result is NOT multiplied by 100. + """ + return np.mean(np.arctan(np.abs((actual - predicted) / (actual + EPSILON)))) + + +def std_ae(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Normalized Absolute Error.""" + __mae = mae(actual, predicted) + return np.sqrt( + np.sum(np.square(_error(actual, predicted) - __mae)) / (len(actual) - 1), + ) + + +def std_ape(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Normalized Absolute Percentage Error.""" + __mape = mape(actual, predicted) + return np.sqrt( + np.sum(np.square(_percentage_error(actual, predicted) - __mape)) + / (len(actual) - 1), + ) + + +def rmspe(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Root Mean Squared Percentage Error. + + Note: result is NOT multiplied by 100. + """ + return np.sqrt(np.mean(np.square(_percentage_error(actual, predicted)))) + + +def rmdspe(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Root Median Squared Percentage Error. + + Note: result is NOT multiplied by 100. + """ + return np.sqrt(np.median(np.square(_percentage_error(actual, predicted)))) + + +def inrse(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Integral Normalized Root Squared Error.""" + return np.sqrt( + np.sum(np.square(_error(actual, predicted))) + / np.sum(np.square(actual - np.mean(actual))), + ) + + +def rrse(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Root Relative Squared Error.""" + return np.sqrt( + np.sum(np.square(actual - predicted)) + / np.sum(np.square(actual - np.mean(actual))), + ) + + +def rae(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Relative Absolute Error (aka Approximation Error).""" + return np.sum(np.abs(actual - predicted)) / ( + np.sum(np.abs(actual - np.mean(actual))) + EPSILON + ) + + +def mda(actual: np.ndarray, predicted: np.ndarray) -> np.ndarray: + """Mean Directional Accuracy.""" + return np.mean( + ( + np.sign(actual[1:] - actual[:-1]) == np.sign(predicted[1:] - predicted[:-1]) + ).astype(int), + ) + + +METRICS = { + "mse": mse, + "rmse": rmse, + "nrmse": nrmse, + "me": me, + "mae": mae, + "mad": mad, + "gmae": gmae, + "mdae": mdae, + "mpe": mpe, + "mape": mape, + "mdape": mdape, + "smape": smape, + "smdape": smdape, + "maape": maape, + "std_ae": std_ae, + "std_ape": std_ape, + "rmspe": rmspe, + "rmdspe": rmdspe, + "inrse": inrse, + "rrse": rrse, + "rae": rae, + "mda": mda, +} + + +def evaluate(actual: np.ndarray, predicted: np.ndarray, metrics: dict) -> dict: + """Compute error metrics.""" + results = {} + for name in metrics: + try: + results[name] = METRICS[name](actual, predicted) + except ValueError as err: + results[name] = np.nan + logger.info(f"Unable to compute metric {name}: {err}") + return results + + +def evaluate_all(actual: np.ndarray, predicted: np.ndarray) -> dict: + """Compute all metrics.""" + return evaluate(actual, predicted, metrics=set(METRICS.keys())) # type: ignore diff --git a/features/feature-segmentation-eval-tool/tests/__init__.py b/features/feature-segmentation-eval-tool/tests/__init__.py new file mode 100644 index 0000000..51fdd37 --- /dev/null +++ b/features/feature-segmentation-eval-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Feature segmentation evaluation package.""" diff --git a/features/feature-segmentation-eval-tool/tests/conftest.py b/features/feature-segmentation-eval-tool/tests/conftest.py new file mode 100644 index 0000000..ff35427 --- /dev/null +++ b/features/feature-segmentation-eval-tool/tests/conftest.py @@ -0,0 +1,90 @@ +"""Test fixtures. + +Set up all data used in tests. +""" +import tempfile +from pathlib import Path +from typing import Union + +import numpy as np +import pandas as pd +import pytest + + +@pytest.fixture() +def gt_dir() -> Union[str, Path]: + """Create directory for groundtruth features data.""" + return Path(tempfile.mkdtemp(dir=Path.cwd())) + + +@pytest.fixture() +def pred_dir() -> Union[str, Path]: + """Create directory for predicted features data.""" + return Path(tempfile.mkdtemp(dir=Path.cwd())) + + +@pytest.fixture() +def output_directory() -> Union[str, Path]: + """Create output directory.""" + return Path(tempfile.mkdtemp(dir=Path.cwd())) + + +@pytest.fixture( + params=[ + (".csv", 500, True, True), + (".arrow", 100, True, False), + (".csv", 1000, False, True), + (".csv", 10000, True, False), + ], +) +def params(request: pytest.FixtureRequest) -> pytest.FixtureRequest: + """To get the parameter of the fixture.""" + return request.param + + +@pytest.fixture() +def generate_data( + gt_dir: Union[str, Path], + pred_dir: Union[str, Path], + params: pytest.FixtureRequest, +) -> tuple[Union[str, Path], Union[str, Path]]: + """Creating dataset for groundtruth and prediction.""" + file_ext, size, _, _ = params + df_size = size + rng = np.random.default_rng(42) + + diction_1 = { + "intensity_image": list(np.repeat("p0_y1_r19_c0.ome.tif", df_size)), + "mask_image": list(np.repeat("p0_y1_r19_c0.ome.tif", df_size)), + "label": list(range(1, df_size + 1)), + "INTEGRATED_INTENSITY": rng.uniform(0.0, 6480.0, size=df_size), + "MEAN": rng.uniform(0.0, 43108.5, size=df_size), + "UNIFORMITY": rng.normal(0.0, 1.0, size=df_size), + "P01": rng.integers(low=1, high=10, size=df_size), + "POLYGONALITY_AVE": list(np.repeat(0, df_size)), + } + df_size = round(size / 1.2) + + diction_2 = { + "intensity_image": list(np.repeat("p0_y1_r01_c0.ome.tif", df_size)), + "mask_image": list(np.repeat("p0_y1_r01_c0.ome.tif", df_size)), + "label": list(range(1, df_size + 1)), + "INTEGRATED_INTENSITY": rng.uniform(0.0, 8000.0, size=df_size), + "MEAN": rng.uniform(0.0, 6000.5, size=df_size), + "UNIFORMITY": rng.normal(0.0, 0.5, size=df_size), + "P01": rng.integers(low=1, high=20, size=df_size), + "POLYGONALITY_AVE": list(np.repeat(0, df_size)), + } + df1 = pd.DataFrame(diction_1) + df2 = pd.DataFrame(diction_2) + if file_ext == ".csv": + for i in range(5): + df1.to_csv(Path(gt_dir, f"p0_y1_r0{i}_c0.csv"), index=False) + df2.to_csv(Path(pred_dir, f"p0_y1_r0{i}_c0.csv"), index=False) + + if file_ext == ".arrow": + for i in range(5): + df1.to_feather(Path(gt_dir, f"p0_y1_r0{i}_c0.arrow")) + df2.to_feather(Path(pred_dir, f"p0_y1_r0{i}_c0.arrow")) + + return gt_dir, pred_dir diff --git a/features/feature-segmentation-eval-tool/tests/test_cli.py b/features/feature-segmentation-eval-tool/tests/test_cli.py new file mode 100644 index 0000000..c92f8ea --- /dev/null +++ b/features/feature-segmentation-eval-tool/tests/test_cli.py @@ -0,0 +1,41 @@ +"""Test Command line Tool.""" +import shutil +from pathlib import Path +from typing import Union + +from polus.images.features.feature_segmentation_eval.__main__ import app +from typer.testing import CliRunner + + +def clean_directories() -> None: + """Remove all temporary directories.""" + for d in Path(".").cwd().iterdir(): + if d.is_dir() and d.name.startswith("tmp"): + shutil.rmtree(d) + + +def test_cli( + generate_data: tuple[Union[Path, str], Union[Path, str]], + output_directory: Union[str, Path], +) -> None: + """Test the command line.""" + runner = CliRunner() + gt_dir, pred_dir = generate_data + result = runner.invoke( + app, + [ + "--GTDir", + gt_dir, + "--PredDir", + pred_dir, + "--filePattern", + ".*.csv", + "--combineLabels", + "--singleOutFile", + "--outDir", + output_directory, + ], + ) + + assert result.exit_code == 0 + clean_directories() diff --git a/features/feature-segmentation-eval-tool/tests/test_feature_single.py b/features/feature-segmentation-eval-tool/tests/test_feature_single.py new file mode 100644 index 0000000..505b8c4 --- /dev/null +++ b/features/feature-segmentation-eval-tool/tests/test_feature_single.py @@ -0,0 +1,40 @@ +"""Test feature segmentation evaluation package.""" +import shutil +from pathlib import Path +from typing import Union + +import polus.images.features.feature_segmentation_eval.feature_evaluation as fs +import pytest +import vaex + + +def clean_directories() -> None: + """Remove all temporary directories.""" + for d in Path(".").cwd().iterdir(): + if d.is_dir() and d.name.startswith("tmp"): + shutil.rmtree(d) + + +def test_feature_evaluation( + generate_data: tuple[Path, Path], + output_directory: Union[str, Path], + params: pytest.FixtureRequest, +) -> None: + """Test calculating metrics for predicted and ground truth histograms.""" + _, _, combinelabels, single_outfile = params + gt_dir, pred_dir = generate_data + fs.feature_evaluation( + gt_dir=gt_dir, + pred_dir=pred_dir, + combine_labels=combinelabels, + file_pattern=".*", + single_out_file=single_outfile, + out_dir=output_directory, + ) + + for file in list(Path(output_directory).rglob("*")): + df = vaex.open(file) + num_columns = 39 + assert len(df.columns) == num_columns + assert (df.shape[0]) != 0 + clean_directories() diff --git a/features/polus-csv-statistics-plugin/Dockerfile b/features/polus-csv-statistics-plugin/Dockerfile new file mode 100644 index 0000000..d6b8f9f --- /dev/null +++ b/features/polus-csv-statistics-plugin/Dockerfile @@ -0,0 +1,17 @@ +FROM polusai/bfio:2.1.9 + +COPY VERSION / + +ARG EXEC_DIR="/opt/executables" +ARG DATA_DIR="/data" + +RUN mkdir -p ${EXEC_DIR} \ + && mkdir -p ${DATA_DIR}/inputs \ + && mkdir ${DATA_DIR}/outputs + +COPY src ${EXEC_DIR}/ +WORKDIR ${EXEC_DIR} + +RUN pip3 install -r ${EXEC_DIR}/requirements.txt + +ENTRYPOINT ["python3", "/opt/executables/main.py"] \ No newline at end of file diff --git a/features/polus-csv-statistics-plugin/README.md b/features/polus-csv-statistics-plugin/README.md new file mode 100644 index 0000000..51ac1c4 --- /dev/null +++ b/features/polus-csv-statistics-plugin/README.md @@ -0,0 +1,37 @@ +# CSV Statistics + +This WIPP plugin performs statistics on values in each column of a csv file if the data is numeric. Rows of data are grouped together by rows that have a matching value in a column with header named `file`. If no columns have the `file` header, then this plugin throws and error. + +Available statistics are: + +1. [mean (arithmetic mean)](https://en.wikipedia.org/wiki/Mean#Arithmetic_mean_(AM)) +2. [median](https://en.wikipedia.org/wiki/Median#The_sample_median) +3. [std (standard deviation)](https://en.wikipedia.org/wiki/Standard_deviation) +4. [var (variance)](https://en.wikipedia.org/wiki/Variance) +5. [skew (Fisher-Pearson skewness)](https://www.itl.nist.gov/div898/handbook/eda/section3/eda35b.htm) +6. [kurt (excess kurtosis)](https://www.itl.nist.gov/div898/handbook/eda/section3/eda35b.htm) +7. count (number of rows sampled) +8. [iqr (Interquartile_range)](https://en.wikipedia.org/wiki/Interquartile_range) + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes two input argument and one output argument: + +| Name | Description | I/O | Type | +| --------------- | --------------------------------------------------- | ------ | ------------- | +| `--statistics` | Types of statistics to calculate | Input | array | +| `--inpDir` | Input csv collection to be processed by this plugin | Input | csvCollection | +| `--filePattern` | The filePattern of the images in represented in csv | Input | string | +| `--groupBy` | The variable(s) of how the images should be grouped | Input | string | +| `--outDir` | Output collection | Output | csvCollection | diff --git a/features/polus-csv-statistics-plugin/VERSION b/features/polus-csv-statistics-plugin/VERSION new file mode 100644 index 0000000..7dff5b8 --- /dev/null +++ b/features/polus-csv-statistics-plugin/VERSION @@ -0,0 +1 @@ +0.2.1 \ No newline at end of file diff --git a/features/polus-csv-statistics-plugin/build-docker.sh b/features/polus-csv-statistics-plugin/build-docker.sh new file mode 100755 index 0000000..ff8f13c --- /dev/null +++ b/features/polus-csv-statistics-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$( fcheck: + fcheck += 1 + logger.info('Unique Files parsed: {}'.format(fnum)) \ No newline at end of file diff --git a/features/polus-csv-statistics-plugin/src/requirements.txt b/features/polus-csv-statistics-plugin/src/requirements.txt new file mode 100644 index 0000000..6dd96c6 --- /dev/null +++ b/features/polus-csv-statistics-plugin/src/requirements.txt @@ -0,0 +1 @@ +filepattern==1.4.7 \ No newline at end of file diff --git a/formats/arrow-to-tabular-tool/.bumpversion.cfg b/formats/arrow-to-tabular-tool/.bumpversion.cfg new file mode 100644 index 0000000..47c6f72 --- /dev/null +++ b/formats/arrow-to-tabular-tool/.bumpversion.cfg @@ -0,0 +1,27 @@ +[bumpversion] +current_version = 0.2.3-dev0 +commit = True +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:plugin.json] + +[bumpversion:file:VERSION] + +[bumpversion:file:src/polus/images/formats/arrow_to_tabular/__init__.py] diff --git a/formats/arrow-to-tabular-tool/.gitignore b/formats/arrow-to-tabular-tool/.gitignore new file mode 100644 index 0000000..c4aa6d8 --- /dev/null +++ b/formats/arrow-to-tabular-tool/.gitignore @@ -0,0 +1,175 @@ + #Byte-compiled / optimized / DLL files + __pycache__/ + *.py[cod] + *$py.class + + # C extensions + *.so + + # Distribution / packaging + .Python + build/ + develop-eggs/ + dist/ + downloads/ + eggs/ + .eggs/ + lib/ + lib64/ + parts/ + sdist/ + var/ + wheels/ + share/python-wheels/ + *.egg-info/ + .installed.cfg + *.egg + MANIFEST + + # PyInstaller + # Usually these files are written by a python script from a template + # before PyInstaller builds the exe, so as to inject date/other infos into it. + *.manifest + *.spec + + # Installer logs + pip-log.txt + pip-delete-this-directory.txt + + # Unit test / coverage reports + htmlcov/ + .tox/ + .nox/ + .coverage + .coverage.* + .cache + nosetests.xml + coverage.xml + *.cover + *.py,cover + .hypothesis/ + .pytest_cache/ + cover/ + + # Translations + *.mo + *.pot + + # Django stuff: + *.log + local_settings.py + db.sqlite3 + db.sqlite3-journal + + # Flask stuff: + instance/ + .webassets-cache + + # Scrapy stuff: + .scrapy + + # Sphinx documentation + docs/_build/ + + # PyBuilder + .pybuilder/ + target/ + + # Jupyter Notebook + .ipynb_checkpoints + + # IPython + profile_default/ + ipython_config.py + + # pyenv + # For a library or package, you might want to ignore these files since the code is + # intended to run in multiple environments; otherwise, check them in: + # .python-version + + # pipenv + # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. + # However, in case of collaboration, if having platform-specific dependencies or dependencies + # having no cross-platform support, pipenv may install dependencies that don't work, or not + # install all needed dependencies. + #Pipfile.lock + + # poetry + # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. + # This is especially recommended for binary packages to ensure reproducibility, and is more + # commonly ignored for libraries. + # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control + poetry.lock + ../../poetry.lock + + # pdm + # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. + #pdm.lock + # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it + # in version control. + # https://pdm.fming.dev/#use-with-ide + .pdm.toml + + # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm + __pypackages__/ + + # Celery stuff + celerybeat-schedule + celerybeat.pid + + # SageMath parsed files + *.sage.py + + # Environments + .env + .venv + env/ + venv/ + ENV/ + env.bak/ + venv.bak/ + + # Spyder project settings + .spyderproject + .spyproject + + # Rope project settings + .ropeproject + + # mkdocs documentation + /site + + # mypy + .mypy_cache/ + .dmypy.json + dmypy.json + + # Pyre type checker + .pyre/ + + # pytype static type analyzer + .pytype/ + + # Cython debug symbols + cython_debug/ + + # PyCharm + # JetBrains specific template is maintained in a separate JetBrains.gitignore that can + # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore + # and can be added to the global gitignore or merged into this file. For a more nuclear + # option (not recommended) you can uncomment the following to ignore the entire idea folder. + #.idea/ + + # vscode + .vscode + + # test data directory + data + # yaml file + .pre-commit-config.yaml + + # hidden files + .DS_Store + .ds_store + # flake8 + .flake8 diff --git a/formats/arrow-to-tabular-tool/Dockerfile b/formats/arrow-to-tabular-tool/Dockerfile new file mode 100644 index 0000000..210b38a --- /dev/null +++ b/formats/arrow-to-tabular-tool/Dockerfile @@ -0,0 +1,20 @@ +FROM polusai/bfio:2.1.9 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".csv" + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +RUN pip3 install --index-url https://test.pypi.org/simple/ filepattern==2.2.7 +COPY src ${EXEC_DIR}/src + +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +ENTRYPOINT ["python3", "-m", "polus.images.formats.arrow_to_tabular"] +CMD ["--help"] diff --git a/formats/arrow-to-tabular-tool/README.md b/formats/arrow-to-tabular-tool/README.md new file mode 100644 index 0000000..5b9d36e --- /dev/null +++ b/formats/arrow-to-tabular-tool/README.md @@ -0,0 +1,29 @@ +# Arrow to Tabular (v0.2.0) +This WIPP plugin allows analysts to convert Arrow Feather File Format (V2) into the following file formats for researchers: \ + - `.parquet` \ + - `.csv` + +Contact [Kelechi Nina Mezu](mailto:nina.mezu@nih.gov), [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`bash build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the +contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes two input arguments and one output argument: + +| Name | Description | I/O | Type | +| --------------- | ------------------------------------------------------------ | ------ | ---------- | +| `--inpDir` | Input generic data collection to be processed by this plugin | Input | collection | +| `--fileFormat` | Filename pattern to convert | Input | string | +| `--outDir` | Output collection | Output | collection | +| `--preview` | Generate a JSON file with outputs | Output | JSON | diff --git a/formats/arrow-to-tabular-tool/VERSION b/formats/arrow-to-tabular-tool/VERSION new file mode 100644 index 0000000..3988334 --- /dev/null +++ b/formats/arrow-to-tabular-tool/VERSION @@ -0,0 +1 @@ +0.2.3-dev0 diff --git a/formats/arrow-to-tabular-tool/arrowtotabular.cwl b/formats/arrow-to-tabular-tool/arrowtotabular.cwl new file mode 100644 index 0000000..df3754c --- /dev/null +++ b/formats/arrow-to-tabular-tool/arrowtotabular.cwl @@ -0,0 +1,28 @@ +class: CommandLineTool +cwlVersion: v1.2 +inputs: + fileFormat: + inputBinding: + prefix: --fileFormat + type: string + inpDir: + inputBinding: + prefix: --inpDir + type: Directory + outDir: + inputBinding: + prefix: --outDir + type: Directory +outputs: + outDir: + outputBinding: + glob: $(inputs.outDir.basename) + type: Directory +requirements: + DockerRequirement: + dockerPull: polusai/arrow-to-tabular-tool:0.2.3-dev0 + InitialWorkDirRequirement: + listing: + - entry: $(inputs.outDir) + writable: true + InlineJavascriptRequirement: {} diff --git a/formats/arrow-to-tabular-tool/build-docker.sh b/formats/arrow-to-tabular-tool/build-docker.sh new file mode 100755 index 0000000..b1ddde0 --- /dev/null +++ b/formats/arrow-to-tabular-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(","Hamdah Shafqat abbasi "] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" +filepattern = "^2.0.4" +typer = "^0.7.0" +tqdm = "^4.64.1" +blake3 = "^0.3.3" +fcsparser = "^0.2.4" +llvmlite = "^0.39.1" +fastapi = "^0.92.0" +vaex = "^4.7.0" + + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +pre-commit = "^3.1.0" +black = "^23.1.0" +flake8 = "^6.0.0" +mypy = "^1.0.1" +pytest = "^7.2.1" +pandas = "^1.5.3" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/formats/arrow-to-tabular-tool/run-plugin.sh b/formats/arrow-to-tabular-tool/run-plugin.sh new file mode 100755 index 0000000..22f347e --- /dev/null +++ b/formats/arrow-to-tabular-tool/run-plugin.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +#!/bin/bash +version=$( None: + """Execute Main function.""" + logger.info(f"inpDir = {inp_dir}") + logger.info(f"outDir = {out_dir}") + logger.info(f"fileFormat = {file_format}") + + inp_dir = inp_dir.resolve() + out_dir = out_dir.resolve() + + assert inp_dir.exists(), f"{inp_dir} doesnot exists!! Please check input path again" + assert ( + out_dir.exists() + ), f"{out_dir} doesnot exists!! Please check output path again" + FILE_EXT = os.environ.get("POLUS_TAB_EXT", ".csv") + + if file_format == Format.Default: + file_format = FILE_EXT + elif file_format == Format.CSV: + file_format = ".csv" + elif file_format == Format.PARQUET: + file_format = ".parquet" + elif file_format == None: + file_format = FILE_EXT + + assert file_format in [ + ".csv", + ".parquet", + ], f"This tabular file format: {file_format} is not support supported by this plugin!! Choose either CSV or Parquet FileFormat" + + pattern_list = [".feather", ".arrow"] + pattern = [f.suffix for f in inp_dir.iterdir() if f.suffix in pattern_list][0] + assert ( + pattern in pattern_list + ), f"This input file extension {pattern} is not support supported by this plugin!! It should be either .feather and .arrow files" + filepattern = {".feather": ".*.feather", ".arrow": ".*.arrow"} + + featherPattern = filepattern[pattern] + + fps = fp.FilePattern(inp_dir, featherPattern) + + if preview: + with open(pathlib.Path(out_dir, "preview.json"), "w") as jfile: + out_json: dict[str, Any] = { + "filepattern": featherPattern, + "outDir": [], + } + for file in fps(): + out_name = str(file[1][0].stem) + file_format + out_json["outDir"].append(out_name) + json.dump(out_json, jfile, indent=2) + + with ProcessPoolExecutor(max_workers) as executor: + processes = [] + for files in fps: + file = files[1][0] + processes.append(executor.submit(arrow_tabular, file, file_format, out_dir)) + + for process in tqdm( + as_completed(processes), desc="Arrow --> Tabular", total=len(processes) + ): + process.result() + + logger.info("Finished all processes!") + + +if __name__ == "__main__": + typer.run(main) diff --git a/formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/arrow_to_tabular.py b/formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/arrow_to_tabular.py new file mode 100644 index 0000000..719f324 --- /dev/null +++ b/formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/arrow_to_tabular.py @@ -0,0 +1,53 @@ +"""Arrow to Tabular.""" +import logging +import pathlib + +from enum import Enum +import vaex + +logger = logging.getLogger(__name__) + + + +class Format(str, Enum): + """Extension types to be converted.""" + CSV = ".csv" + PARQUET = ".parquet" + Default = "default" + + +def arrow_tabular(file: pathlib.Path, file_format: str, out_dir: pathlib.Path) -> None: + """Convert Arrow file into tabular file. + This plugin uses vaex to open an arrow file and converts into csv or parquet tabular data. + + Args: + file : Path to input file. + file_format : Filepattern of desired tabular output file. + out_dir: Path to output directory. + """ + file_name = pathlib.Path(file).stem + logger.info("Arrow Conversion: Copy ${file_name} into outDir for processing...") + + output_file = pathlib.Path(out_dir, (file_name + file_format)) + + logger.info("Arrow Conversion: Converting file into PyArrow Table") + + data = vaex.open(file) + logger.info("Arrow Conversion: table converted") + ncols = len(data) + chunk_size = max([2**24 // ncols, 1]) + + logger.info("Arrow Conversion: checking for file format") + + if file_format == ".csv": + logger.info("Arrow Conversion: Converting PyArrow Table into .csv file") + # Streaming contents of Arrow Table into csv + return data.export_csv(output_file, chunksize=chunk_size) + + elif file_format == ".parquet": + logger.info("Arrow Conversion: Converting PyArrow Table into .parquet file") + return data.export_parquet(output_file) + else: + logger.error( + "Arrow Conversion Error: This format is not supported in this plugin" + ) diff --git a/formats/arrow-to-tabular-tool/tests/__init__.py b/formats/arrow-to-tabular-tool/tests/__init__.py new file mode 100644 index 0000000..d7bcf67 --- /dev/null +++ b/formats/arrow-to-tabular-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Arrow to Tabular.""" diff --git a/formats/arrow-to-tabular-tool/tests/test_main.py b/formats/arrow-to-tabular-tool/tests/test_main.py new file mode 100644 index 0000000..9dd2147 --- /dev/null +++ b/formats/arrow-to-tabular-tool/tests/test_main.py @@ -0,0 +1,69 @@ +"""Testing of Arrow to Tabular plugin.""" +import os +import pathlib +import random +import string + +import filepattern as fp +import numpy as np +import pandas as pd +import pytest +from polus.images.formats.arrow_to_tabular.arrow_to_tabular import arrow_tabular + + +@pytest.fixture() +def generate_arrow(): + """Create pandas dataframe and convert into to arrow file format.""" + dirpath = os.path.abspath(os.path.join(__file__, "../..")) + inpDir = pathlib.Path(dirpath, "data/input") + outDir = pathlib.Path(dirpath, "data/output") + if not inpDir.exists(): + inpDir.mkdir(parents=True, exist_ok=True) + if not outDir.exists(): + outDir.mkdir(exist_ok=True, parents=True) + + df = pd.DataFrame( + { + "A": [random.choice(string.ascii_letters) for i in range(100)], + "B": np.random.randint(low=1, high=100, size=100), + "C": np.random.normal(0.0, 1.0, size=100), + }, + ) + df.to_feather(pathlib.Path(inpDir, "data.arrow")) + df.to_feather(pathlib.Path(inpDir, "data1.arrow")) + + return inpDir, outDir + + +def test_arrow_tabular(generate_arrow): + """Test of Arrow to Parquet file format.""" + pattern = ".parquet" + filePattern = {".csv": ".*.csv", ".parquet": ".*.parquet"} + out_pattern = filePattern[pattern] + in_pattern = ".*.arrow" + fps = fp.FilePattern(generate_arrow[0], in_pattern) + for file in fps(): + arrow_tabular(file[1][0], pattern, generate_arrow[1]) + + assert ( + all( + file[1][0].suffix + for file in fp.FilePattern(generate_arrow[1], out_pattern)() + ) + is True + ) + [os.remove(f) for f in generate_arrow[1].iterdir() if f.name.endswith(pattern)] + + pattern = ".csv" + out_pattern = filePattern[pattern] + fps = fp.FilePattern(generate_arrow[0], in_pattern) + for file in fps(): + arrow_tabular(file[1][0], pattern, generate_arrow[1]) + + assert ( + all( + file[1][0].suffix + for file in fp.FilePattern(generate_arrow[1], out_pattern)() + ) + is True + ) diff --git a/formats/polus-fcs-to-csv-converter-plugin/Dockerfile b/formats/polus-fcs-to-csv-converter-plugin/Dockerfile new file mode 100644 index 0000000..78be1a4 --- /dev/null +++ b/formats/polus-fcs-to-csv-converter-plugin/Dockerfile @@ -0,0 +1,8 @@ +FROM polusai/bfio:2.1.9 + +COPY VERSION ${EXEC_DIR} +COPY src ${EXEC_DIR}/ + +RUN pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir + +ENTRYPOINT ["python3", "/opt/executables/main.py"] \ No newline at end of file diff --git a/formats/polus-fcs-to-csv-converter-plugin/README.md b/formats/polus-fcs-to-csv-converter-plugin/README.md new file mode 100644 index 0000000..fd4dc62 --- /dev/null +++ b/formats/polus-fcs-to-csv-converter-plugin/README.md @@ -0,0 +1,31 @@ +# Fcs to Csv file converter + +The fcs to csv file converter plugin converts fcs file to csv file.The input file should be in .fcs file format and output will be .csv file format. + +## Input: +The input should be a file in fcs format. + +## Output: +The output is a csv file. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes eight input argument and one output argument: + +| Name | Description | I/O | Type | +| ---------- | ------------------------- | ------ | ------------- | +| `--inpDir` | Input fcs file collection | Input | collection | +| `--outDir` | Output collection | Output | csvCollection | + + diff --git a/formats/polus-fcs-to-csv-converter-plugin/VERSION b/formats/polus-fcs-to-csv-converter-plugin/VERSION new file mode 100644 index 0000000..28af839 --- /dev/null +++ b/formats/polus-fcs-to-csv-converter-plugin/VERSION @@ -0,0 +1 @@ +0.2.5 \ No newline at end of file diff --git a/formats/polus-fcs-to-csv-converter-plugin/build-docker.sh b/formats/polus-fcs-to-csv-converter-plugin/build-docker.sh new file mode 100644 index 0000000..9a33106 --- /dev/null +++ b/formats/polus-fcs-to-csv-converter-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:plugin.json] + +[bumpversion:file:VERSION] + +[bumpversion:file:src/polus/images/formats/tabular_converter/__init__.py] diff --git a/formats/tabular-converter-tool/.gitignore b/formats/tabular-converter-tool/.gitignore new file mode 100644 index 0000000..e891280 --- /dev/null +++ b/formats/tabular-converter-tool/.gitignore @@ -0,0 +1,175 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +poetry.lock +../../poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# vscode +.vscode + +# test data directory +data +# yaml file +.pre-commit-config.yaml + +# hidden files +.DS_Store +.ds_store +# flake8 +.flake8 diff --git a/formats/tabular-converter-tool/Dockerfile b/formats/tabular-converter-tool/Dockerfile new file mode 100644 index 0000000..3c3fd17 --- /dev/null +++ b/formats/tabular-converter-tool/Dockerfile @@ -0,0 +1,20 @@ +FROM polusai/bfio:2.1.9 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".csv" + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +RUN pip3 install --index-url https://test.pypi.org/simple/ filepattern==2.2.7 +COPY src ${EXEC_DIR}/src + +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +ENTRYPOINT ["python3", "-m", "polus.images.formats.tabular_converter"] +CMD ["--help"] diff --git a/formats/tabular-converter-tool/README.md b/formats/tabular-converter-tool/README.md new file mode 100644 index 0000000..8f650d4 --- /dev/null +++ b/formats/tabular-converter-tool/README.md @@ -0,0 +1,41 @@ +# Tabular Converter (v0.1.0) + +This WIPP plugin allows the tabular data conversion to `arrow` file format and vice versa. Currently this plugins handles only the vaex supported file formats. +This plugin supports the following file formats which are convertable into `arrow` file format: + +1. fcs +2. csv +3. hdf5 +4. fits +5. parquet +6. feather + +However the `arrow` file format is convertable to all other file formats except `fcs` and `fits`. +The support for additional file formats will be added in future. + + +Contact [Kelechi Nina Mezu](mailto:nina.mezu@nih.gov), [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`bash build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the +contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes two input arguments and one output argument: + +| Name | Description | I/O | Type | +|---------------|-------------------------|--------|--------| +| `--inpDir` | Input generic data collection to be processed by this plugin | Input | genericData | +| `--filePattern` | Pattern to parse tabular files | Input | string | +| `--fileExtension` | Desired pattern to convert | Input | string | +| `--outDir` | Output collection | Output | genericData | +| `--preview` | Generate JSON file with outputs | Output | JSON | diff --git a/formats/tabular-converter-tool/VERSION b/formats/tabular-converter-tool/VERSION new file mode 100644 index 0000000..9d8d2c1 --- /dev/null +++ b/formats/tabular-converter-tool/VERSION @@ -0,0 +1 @@ +0.1.2-dev0 diff --git a/formats/tabular-converter-tool/build-docker.sh b/formats/tabular-converter-tool/build-docker.sh new file mode 100644 index 0000000..fed7c5a --- /dev/null +++ b/formats/tabular-converter-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(", +"hamshkhawar " +] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" +filepattern = "^2.0.4" +typer = "^0.7.0" +tqdm = "^4.64.1" +pyarrow = "^11.0.0" +blake3 = "^0.3.3" +fcsparser = "^0.2.4" +llvmlite = "^0.39.1" +fastapi = "^0.92.0" +astropy = "5.2.1" +vaex = "^4.7.0" + + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +pre-commit = "^3.1.0" +black = "^23.1.0" +flake8 = "^6.0.0" +mypy = "^1.0.1" +pytest = "^7.2.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/formats/tabular-converter-tool/run-plugin.sh b/formats/tabular-converter-tool/run-plugin.sh new file mode 100644 index 0000000..00d2e44 --- /dev/null +++ b/formats/tabular-converter-tool/run-plugin.sh @@ -0,0 +1,26 @@ +#!/bin/bash +version=$( None: + """Execute Main function.""" + logger.info(f"inpDir = {inp_dir}") + logger.info(f"outDir = {out_dir}") + logger.info(f"filePattern = {file_pattern}") + logger.info(f"fileExtension = {file_extension}") + + assert inp_dir.exists(), f"{inp_dir} doesnot exist!! Please check input path again" + assert out_dir.exists(), f"{out_dir} doesnot exist!! Please check output path again" + + file_pattern = ".*" + file_pattern + + fps = fp.FilePattern(inp_dir, file_pattern) + + if preview: + with open(pathlib.Path(out_dir, "preview.json"), "w") as jfile: + out_json: dict[str, Any] = { + "filepattern": file_pattern, + "outDir": [], + } + for file in fps: + out_name = str(file[1][0].stem) + file_pattern + out_json["outDir"].append(out_name) + json.dump(out_json, jfile, indent=2) + + processes = [] + with ProcessPoolExecutor(max_workers) as executor: + for files in fps: + file = files[1][0] + tab = tc.ConvertTabular(file, file_extension, out_dir) + if files[1][0].suffix == ".fcs": + processes.append(executor.submit(tab.fcs_to_arrow)) + elif files[1][0].suffix == ".arrow": + processes.append(executor.submit(tab.arrow_to_tabular)) + else: + processes.append(executor.submit(tab.df_to_arrow)) + + for f in tqdm( + as_completed(processes), + desc=f"converting tabular data to {file_pattern}", + total=len(processes), + ): + f.result() + + tab.remove_files() + + logger.info("Finished all processes!") + + +if __name__ == "__main__": + app() diff --git a/formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/tabular_converter.py b/formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/tabular_converter.py new file mode 100644 index 0000000..9303907 --- /dev/null +++ b/formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/tabular_converter.py @@ -0,0 +1,158 @@ +"""Tabular Converter.""" +import enum +import logging +import os +import pathlib + +import fcsparser +import vaex + +logger = logging.getLogger(__name__) + +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".arrow") + + +class Extensions(str, enum.Enum): + """Extension types to be converted.""" + + FITS = ".fits" + FEATHER = ".feather" + PARQUET = ".parquet" + HDF = ".hdf5" + FCS = ".fcs" + CSV = ".csv" + ARROW = ".arrow" + Default = POLUS_TAB_EXT + + +class ConvertTabular: + """Convert vaex supported file formats into Arrow data format and vice versa. + + Args: + file: Path to input file. + file_extension : Desired ouput file extension. + out_dir: Path to save the output csv file. + """ + + def __init__( + self, file: pathlib.Path, file_extension: Extensions, out_dir: pathlib.Path + ): + """Define Instance attributes.""" + self.file = file + self.out_dir = out_dir + self.file_extension = file_extension + self.output_file = pathlib.Path( + self.out_dir, (self.file.stem + self.file_extension) + ) + + def csv_to_df(self) -> vaex.DataFrame: + """Convert csv into datafram or hdf5 file.""" + logger.info("csv_to_df: Copy csv file into out_dir for processing...") + logger.info("csv_to_df: Checking size of csv file...") + # Open csv file and count rows in file + with open(self.file, encoding="utf-8") as fr: + ncols = len(fr.readline().split(",")) + chunk_size = max([2**24 // ncols, 1]) + logger.info("csv_to_df: # of columns are: " + str(ncols)) + # Convert large csv files to hdf5 if more than 1,000,000 rows + logger.info("csv_to_df: converting file into hdf5 format") + df = vaex.from_csv(self.file, convert=True, chunk_size=chunk_size) + return df + + def binary_to_df(self) -> vaex.DataFrame: + """Convert any binary formats into vaex dataframe.""" + binary_patterns = [".fits", ".feather", ".parquet", ".hdf5", ".arrow"] + logger.info("binary_to_df: Scanning directory for binary file pattern... ") + if self.file_extension in binary_patterns: + # convert hdf5 to vaex df + df = vaex.open(self.file) + return df + else: + raise FileNotFoundError( + "No supported binary file extensions were found in the directory. Please check file directory again." + ) + + def fcs_to_arrow(self) -> None: + """Convert fcs file to csv. Copied from polus-fcs-to-csv-converter plugin.""" + logger.info( + "fcs_to_feather : Begin parsing data out of .fcs file" + self.file.stem + ) + # Use fcsparser to parse data into python dataframe + _, data = fcsparser.parse(self.file, meta_data_only=False, reformat_meta=True) + + # Export the fcs data to vaex df + logger.info("fcs_to_feather: converting data to vaex dataframe...") + df = vaex.from_pandas(data) + logger.info("fcs_to_feather: writing file...") + logger.info( + "fcs_to_feather: Writing Vaex Dataframe to Feather File Format for:" + + self.file.stem + ) + df.export_feather(self.output_file) + + def df_to_arrow(self) -> None: + """Convert vaex dataframe to Arrow feather file.""" + logger.info("df_to_feather: Scanning input directory files... ") + if self.file_extension == ".csv": + # convert csv to vaex df or hdf5 + df = self.csv_to_df() + else: + df = self.binary_to_df() + + logger.info("df_to_arrow: writing file...") + logger.info( + "df_to_arrow: Writing Vaex Dataframe to Feather File Format for:" + + self.file.stem + ) + df.export_feather(self.output_file) + + def remove_files(self) -> None: + """Delete intermediate files other than arrow and json files from output directory.""" + for f in self.out_dir.iterdir(): + extension_list = [ + ".arrow", + ".json", + ".feather", + ".csv", + ".hdf5", + ".fits", + ".fcs", + ".parquet", + ] + if f.suffix not in extension_list: + os.remove(f) + + logger.info("Done") + + def arrow_to_tabular(self) -> None: + """Convert Arrow file into tabular file. + + This function uses vaex to open an arrow file and converts into other vaex supported formats. + Note: At the moment [.csv, parquet, hdf5, feather] file formats are supported. + """ + data = vaex.open(self.file) + logger.info("Arrow Conversion: Copy ${self.file} into outDir for processing...") + ncols = len(data) + chunk_size = max([2**24 // ncols, 1]) + logger.info("Arrow Conversion: checking for file format") + + if self.file_extension == ".csv": + logger.info("Arrow Conversion: Converting PyArrow Table into .csv file") + # Streaming contents of Arrow Table into csv + return data.export_csv(self.output_file, chunksize=chunk_size) + + elif self.file_extension == ".parquet": + logger.info("Arrow Conversion: Converting PyArrow Table into .parquet file") + return data.export_parquet(self.output_file) + + elif self.file_extension == ".hdf5": + logger.info("Arrow Conversion: Converting PyArrow Table into .hdf5") + return data.export_hdf5(self.output_file) + elif self.file_extension == ".feather": + logger.info("Arrow Conversion: Converting PyArrow Table into .hdf5") + return data.export_feather(self.output_file) + + else: + logger.error( + "Arrow Conversion Error: This format is not supported in this plugin" + ) diff --git a/formats/tabular-converter-tool/tabularconverter.cwl b/formats/tabular-converter-tool/tabularconverter.cwl new file mode 100644 index 0000000..66d7feb --- /dev/null +++ b/formats/tabular-converter-tool/tabularconverter.cwl @@ -0,0 +1,32 @@ +class: CommandLineTool +cwlVersion: v1.2 +inputs: + fileExtension: + inputBinding: + prefix: --fileExtension + type: string + filePattern: + inputBinding: + prefix: --filePattern + type: string? + inpDir: + inputBinding: + prefix: --inpDir + type: Directory + outDir: + inputBinding: + prefix: --outDir + type: Directory +outputs: + outDir: + outputBinding: + glob: $(inputs.outDir.basename) + type: Directory +requirements: + DockerRequirement: + dockerPull: polusai/tabular-converter-tool:0.1.2-dev0 + InitialWorkDirRequirement: + listing: + - entry: $(inputs.outDir) + writable: true + InlineJavascriptRequirement: {} diff --git a/formats/tabular-converter-tool/tests/__init__.py b/formats/tabular-converter-tool/tests/__init__.py new file mode 100644 index 0000000..f8d42a1 --- /dev/null +++ b/formats/tabular-converter-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Testing of Tabular Converter plugin.""" diff --git a/formats/tabular-converter-tool/tests/test_main.py b/formats/tabular-converter-tool/tests/test_main.py new file mode 100644 index 0000000..b512a83 --- /dev/null +++ b/formats/tabular-converter-tool/tests/test_main.py @@ -0,0 +1,173 @@ +"""Testing of Tabular Converter plugin.""" +import pathlib +import random +import shutil +import string +import tempfile + +import fcsparser +import filepattern as fp +import numpy as np +import pandas as pd +import pytest +import vaex +from astropy.table import Table +from polus.images.formats.tabular_converter import tabular_converter as tc + + +class Generatedata: + """Generate tabular data with several different file format.""" + + def __init__(self, file_pattern: str) -> None: + """Define instance attributes.""" + self.dirpath = pathlib.Path.cwd() + self.inp_dir = tempfile.mkdtemp(dir=self.dirpath) + self.out_dir = tempfile.mkdtemp(dir=self.dirpath) + self.file_pattern = file_pattern + self.x = self.create_dataframe() + + def get_inp_dir(self) -> str: + """Get input directory.""" + return self.inp_dir + + def get_out_dir(self) -> str: + """Get output directory.""" + return self.out_dir + + def create_dataframe(self) -> pd.core.frame.DataFrame: + """Create Pandas dataframe.""" + return pd.DataFrame( + { + "A": [random.choice(string.ascii_letters) for i in range(100)], + "B": np.random.randint(low=1, high=100, size=100), + "C": np.random.normal(0.0, 1.0, size=100), + }, + ) + + def fits_func(self) -> None: + """Convert pandas dataframe to fits file format.""" + ft = Table.from_pandas(self.x) + ft.write(pathlib.Path(self.inp_dir, "data.fits")) + + def fcs_func(self) -> None: + """Get the test example of fcs data.""" + fpath = fcsparser.test_sample_path + shutil.copy(fpath, self.inp_dir) + + def csv_func(self) -> None: + """Convert pandas dataframe to csv file format.""" + self.x.to_csv(pathlib.Path(self.inp_dir, "data.csv"), index=False) + + def parquet_func(self) -> None: + """Convert pandas dataframe to parquet file format.""" + self.x.to_parquet( + pathlib.Path(self.inp_dir, "data.parquet"), + engine="auto", + compression=None, + ) + + def feather_func(self) -> None: + """Convert pandas dataframe to feather file format.""" + self.x.to_feather(pathlib.Path(self.inp_dir, "data.feather")) + + def arrow_func(self) -> None: + """Convert pandas dataframe to Arrow file format.""" + self.x.to_feather(pathlib.Path(self.inp_dir, "data.arrow")) + + def hdf_func(self) -> None: + """Convert pandas dataframe to hdf5 file format.""" + v_df = vaex.from_pandas(self.x, copy_index=False) + v_df.export(pathlib.Path(self.inp_dir, "data.hdf5")) + + def __call__(self) -> None: + """To make a class callable.""" + data_ext = { + ".hdf5": self.hdf_func, + ".csv": self.csv_func, + ".parquet": self.parquet_func, + ".feather": self.feather_func, + ".fits": self.fits_func, + ".fcs": self.fcs_func, + ".arrow": self.arrow_func, + } + + return data_ext[self.file_pattern]() + + def clean_directories(self): + """Remove files.""" + for d in self.dirpath.iterdir(): + if d.is_dir() and d.name.startswith("tmp"): + shutil.rmtree(d) + + +FILE_EXT = [[".hdf5", ".parquet", ".csv", ".feather", ".fits", ".fcs", ".arrow"]] + + +@pytest.fixture(params=FILE_EXT) +def poly(request): + """To get the parameter of the fixture.""" + return request.param + + +def test_tabular_coverter(poly): + """Testing of vaex supported inter conversion of tabular data.""" + for i in poly: + if i not in [".fcs", ".arrow"]: + d = Generatedata(i) + d() + pattern = f".*{i}" + fps = fp.FilePattern(d.get_inp_dir(), pattern) + for file in fps(): + print(file) + tab = tc.ConvertTabular(file[1][0], ".arrow", d.get_out_dir()) + tab.df_to_arrow() + + assert ( + all( + file[1][0].suffix + for file in fp.FilePattern(d.get_out_dir(), ".arrow") + ) + is True + ) + elif i == ".fcs": + d = Generatedata(".fcs") + d() + pattern = f".*{i}" + fps = fp.FilePattern(d.get_inp_dir(), pattern) + for file in fps(): + tab = tc.ConvertTabular(file[1][0], ".arrow", d.get_out_dir()) + tab.fcs_to_arrow() + + assert ( + all( + file[1][0].suffix + for file in fp.FilePattern(d.get_out_dir(), ".arrow") + ) + is True + ) + + elif i == ".arrow": + d = Generatedata(".arrow") + d() + pattern = f".*{i}" + fps = fp.FilePattern(d.get_inp_dir(), pattern) + extension_list = [ + ".feather", + ".csv", + ".hdf5", + ".parquet", + ] + for ext in extension_list: + for file in fps(): + tab = tc.ConvertTabular(file[1][0], ext, d.get_out_dir()) + tab.arrow_to_tabular() + + assert ( + all( + file[1][0].suffix + for file in fp.FilePattern(d.get_out_dir(), ext) + ) + is True + ) + + d.clean_directories() diff --git a/formats/tabular-to-arrow-tool/.bumpversion.cfg b/formats/tabular-to-arrow-tool/.bumpversion.cfg new file mode 100644 index 0000000..9434540 --- /dev/null +++ b/formats/tabular-to-arrow-tool/.bumpversion.cfg @@ -0,0 +1,27 @@ +[bumpversion] +current_version = 0.2.3-dev0 +commit = True +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:plugin.json] + +[bumpversion:file:VERSION] + +[bumpversion:file:src/polus/images/formats/tabular_to_arrow/__init__.py] diff --git a/formats/tabular-to-arrow-tool/.gitignore b/formats/tabular-to-arrow-tool/.gitignore new file mode 100644 index 0000000..e891280 --- /dev/null +++ b/formats/tabular-to-arrow-tool/.gitignore @@ -0,0 +1,175 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +poetry.lock +../../poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# vscode +.vscode + +# test data directory +data +# yaml file +.pre-commit-config.yaml + +# hidden files +.DS_Store +.ds_store +# flake8 +.flake8 diff --git a/formats/tabular-to-arrow-tool/Dockerfile b/formats/tabular-to-arrow-tool/Dockerfile new file mode 100644 index 0000000..aab61fb --- /dev/null +++ b/formats/tabular-to-arrow-tool/Dockerfile @@ -0,0 +1,20 @@ +FROM polusai/bfio:2.1.9 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".arrow" + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +RUN pip3 install --index-url https://test.pypi.org/simple/ filepattern==2.2.7 +COPY src ${EXEC_DIR}/src + +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +ENTRYPOINT ["python3", "-m", "polus.images.formats.tabular_to_arrow"] +CMD ["--help"] diff --git a/formats/tabular-to-arrow-tool/README.md b/formats/tabular-to-arrow-tool/README.md new file mode 100644 index 0000000..888ad67 --- /dev/null +++ b/formats/tabular-to-arrow-tool/README.md @@ -0,0 +1,34 @@ +# Tabular to Arrow (v0.2.0) + +This WIPP plugin allows analysts to convert various file formats received by researchers into Arrow Feather File Format (V2). This plugin supports the following file extensions: +- `fcs` +- `csv` +- `hdf5` +- `fits` +- `parquet` +- `feather` + +Contact [Kelechi Nina Mezu](mailto:nina.mezu@nih.gov), [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`bash build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the +contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes two input arguments and one output argument: + +| Name | Description | I/O | Type | +|---------------|-------------------------|--------|--------| +| `--inpDir` | Input generic data collection to be processed by this plugin | Input | collection | +| `--filePattern` | Filename pattern to convert | Input | string | +| `--outDir` | Output collection | Output | collection | +| `--preview` | Generate JSON file with outputs | Output | JSON | diff --git a/formats/tabular-to-arrow-tool/VERSION b/formats/tabular-to-arrow-tool/VERSION new file mode 100644 index 0000000..3988334 --- /dev/null +++ b/formats/tabular-to-arrow-tool/VERSION @@ -0,0 +1 @@ +0.2.3-dev0 diff --git a/formats/tabular-to-arrow-tool/build-docker.sh b/formats/tabular-to-arrow-tool/build-docker.sh new file mode 100755 index 0000000..194e843 --- /dev/null +++ b/formats/tabular-to-arrow-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(","hamshkhawar "] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" +filepattern = "^2.0.4" +typer = "^0.7.0" +tqdm = "^4.64.1" +pyarrow = "^11.0.0" +blake3 = "^0.3.3" +fcsparser = "^0.2.4" +llvmlite = "^0.39.1" +fastapi = "^0.92.0" +vaex = "^4.7.0" + + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +pre-commit = "^3.1.0" +black = "^23.1.0" +flake8 = "^6.0.0" +mypy = "^1.0.1" +pytest = "^7.2.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/formats/tabular-to-arrow-tool/run-plugin.sh b/formats/tabular-to-arrow-tool/run-plugin.sh new file mode 100755 index 0000000..6dc39b7 --- /dev/null +++ b/formats/tabular-to-arrow-tool/run-plugin.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +#!/bin/bash +version=$( None: + """Execute Main function.""" + logger.info(f"inpDir = {inp_dir}") + logger.info(f"outDir = {out_dir}") + logger.info(f"filePattern = {file_pattern}") + + assert inp_dir.exists(), f"{inp_dir} doesnot exist!! Please check input path again" + assert out_dir.exists(), f"{out_dir} doesnot exist!! Please check output path again" + + if file_pattern is None: + file_pattern = ".*" + else: + file_pattern = "".join([".*", file_pattern]) + + fps = fp.FilePattern(inp_dir, file_pattern) + + if preview: + with open(pathlib.Path(out_dir, "preview.json"), "w") as jfile: + out_json: dict[str, Any] = { + "filepattern": file_pattern, + "outDir": [], + } + for file in fps: + out_name = str(file[1][0].stem) + POLUS_TAB_EXT + out_json["outDir"].append(out_name) + json.dump(out_json, jfile, indent=2) + + processes = [] + with ProcessPoolExecutor(max_workers) as executor: + for files in fps: + file = files[1][0] + if file_pattern == ".*.fcs": + processes.append(executor.submit(tb.fcs_to_arrow, file, out_dir)) + else: + processes.append( + executor.submit(tb.df_to_arrow, file, file_pattern, out_dir) + ) + + for f in tqdm( + as_completed(processes), + desc=f"converting tabular data to {POLUS_TAB_EXT}", + total=len(processes), + ): + f.result() + + tb.remove_files(out_dir) + + logger.info("Finished all processes!") + + +if __name__ == "__main__": + app() diff --git a/formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/tabular_arrow_converter.py b/formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/tabular_arrow_converter.py new file mode 100644 index 0000000..1dfd452 --- /dev/null +++ b/formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/tabular_arrow_converter.py @@ -0,0 +1,131 @@ +"""Tabular to Arrow.""" +import logging +import os +import pathlib + +import fcsparser +import vaex + +logger = logging.getLogger(__name__) + +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".arrow") + + +def csv_to_df(file: pathlib.Path, out_dir: pathlib.Path) -> vaex.DataFrame: + """Convert csv into datafram or hdf5 file. + + Args: + file: Path to input file. + out_dir: Path to save the output csv file. + + Returns: + Vaex dataframe + + """ + logger.info("csv_to_df: Copy csv file into out_dir for processing...") + + logger.info("csv_to_df: Checking size of csv file...") + # Open csv file and count rows in file + with open(file, encoding="utf-8") as fr: + ncols = len(fr.readline().split(",")) + + chunk_size = max([2**24 // ncols, 1]) + logger.info("csv_to_df: # of columns are: " + str(ncols)) + + # Convert large csv files to hdf5 if more than 1,000,000 rows + logger.info("csv_to_df: converting file into hdf5 format") + df = vaex.from_csv(file, convert=True, chunk_size=chunk_size) + + return df + + +def binary_to_df(file: pathlib.Path, file_pattern: str) -> vaex.DataFrame: + """Convert any binary formats into vaex dataframe. + + Args: + file: Path to input file. + file_pattern : extension of file to convert. + + Returns: + Vaex dataframe. + Raises: + FileNotFoundError: An error occurred if input directory contains file extensions which are not supported by this plugin. + + """ + binary_patterns = [".*.fits", ".*.feather", ".*.parquet", ".*.hdf5", ".*.h5"] + + logger.info("binary_to_df: Scanning directory for binary file pattern... ") + if file_pattern in binary_patterns: + # convert hdf5 to vaex df + df = vaex.open(file) + return df + else: + raise FileNotFoundError( + "No supported binary file extensions were found in the directory. Please check file directory again." + ) + + +def fcs_to_arrow(file: pathlib.Path, out_dir: pathlib.Path) -> None: + """Convert fcs file to csv. Copied from polus-fcs-to-csv-converter plugin. + + Args: + file: Path to the directory containing the fcs file. + out_dir: Path to save the output csv file. + + """ + file_name = file.stem + outname = file_name + POLUS_TAB_EXT + outputfile = out_dir.joinpath(outname) + logger.info("fcs_to_feather : Begin parsing data out of .fcs file" + file_name) + + # Use fcsparser to parse data into python dataframe + _, data = fcsparser.parse(file, meta_data_only=False, reformat_meta=True) + + # Export the fcs data to vaex df + logger.info("fcs_to_feather: converting data to vaex dataframe...") + df = vaex.from_pandas(data) + logger.info("fcs_to_feather: writing file...") + logger.info( + "fcs_to_feather: Writing Vaex Dataframe to Feather File Format for:" + file_name + ) + df.export_feather(outputfile) + + +def df_to_arrow(file: pathlib.Path, file_pattern: str, out_dir: pathlib.Path) -> None: + """Convert vaex dataframe to Arrow feather file. + + Args: + file: Path to the directory to grab file. + file_pattern: File extension. + out_dir: Path to the directory to save feather file. + """ + file_name = file.stem + outname = file_name + POLUS_TAB_EXT + outputfile = out_dir.joinpath(outname) + + logger.info("df_to_feather: Scanning input directory files... ") + if file_pattern == ".*.csv": + # convert csv to vaex df or hdf5 + df = csv_to_df(file, out_dir) + else: + df = binary_to_df(file, file_pattern) + + logger.info("df_to_arrow: writing file...") + logger.info( + "df_to_arrow: Writing Vaex Dataframe to Feather File Format for:" + file_name + ) + df.export_feather(outputfile) + + +def remove_files(out_dir: pathlib.Path) -> None: + """Delete intermediate files other than arrow and json files from output directory. + + Args: + out_dir: Path to the output directory. + + """ + for f in out_dir.iterdir(): + if f.suffix not in [".arrow", ".json"]: + os.remove(f) + + logger.info("Done") diff --git a/formats/tabular-to-arrow-tool/tabulartoarrow.cwl b/formats/tabular-to-arrow-tool/tabulartoarrow.cwl new file mode 100644 index 0000000..10eb175 --- /dev/null +++ b/formats/tabular-to-arrow-tool/tabulartoarrow.cwl @@ -0,0 +1,28 @@ +class: CommandLineTool +cwlVersion: v1.2 +inputs: + filePattern: + inputBinding: + prefix: --filePattern + type: string + inpDir: + inputBinding: + prefix: --inpDir + type: Directory + outDir: + inputBinding: + prefix: --outDir + type: Directory +outputs: + outDir: + outputBinding: + glob: $(inputs.outDir.basename) + type: Directory +requirements: + DockerRequirement: + dockerPull: polusai/tabular-to-arrow-tool:0.2.3-dev0 + InitialWorkDirRequirement: + listing: + - entry: $(inputs.outDir) + writable: true + InlineJavascriptRequirement: {} diff --git a/formats/tabular-to-arrow-tool/tests/__init__.py b/formats/tabular-to-arrow-tool/tests/__init__.py new file mode 100644 index 0000000..04f992e --- /dev/null +++ b/formats/tabular-to-arrow-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Pytests of Tabular to Arrow plugin.""" diff --git a/formats/tabular-to-arrow-tool/tests/test_main.py b/formats/tabular-to-arrow-tool/tests/test_main.py new file mode 100644 index 0000000..b2ca218 --- /dev/null +++ b/formats/tabular-to-arrow-tool/tests/test_main.py @@ -0,0 +1,138 @@ +"""Testing of Tabular to Arrow plugin.""" +import os +import pathlib +import random +import shutil +import string +import typing + +import fcsparser +import filepattern as fp +import numpy as np +import pandas as pd +import pytest +import vaex +from astropy.table import Table +from polus.images.formats.tabular_to_arrow import tabular_arrow_converter as tb + + +class Generatedata: + """Generate tabular data with several different file format.""" + + def __init__(self, file_pattern: str) -> None: + """Define instance attributes.""" + self.dirpath = os.path.abspath(os.path.join(__file__, "../..")) + self.inp_dir = pathlib.Path(self.dirpath, "data/input") + if not self.inp_dir.exists(): + self.inp_dir.mkdir(exist_ok=True, parents=True) + self.out_dir = pathlib.Path(self.dirpath, "data/output") + if not self.out_dir.exists(): + self.out_dir.mkdir(exist_ok=True, parents=True) + self.file_pattern = file_pattern + self.x = self.create_dataframe() + + def get_inp_dir(self) -> typing.Union[str, os.PathLike]: + """Get input directory.""" + return self.inp_dir + + def get_out_dir(self) -> typing.Union[str, os.PathLike]: + """Get output directory.""" + return self.out_dir + + def create_dataframe(self) -> pd.core.frame.DataFrame: + """Create Pandas dataframe.""" + return pd.DataFrame( + { + "A": [random.choice(string.ascii_letters) for i in range(100)], + "B": np.random.randint(low=1, high=100, size=100), + "C": np.random.normal(0.0, 1.0, size=100), + }, + ) + + def fits_func(self) -> None: + """Convert pandas dataframe to fits file format.""" + ft = Table.from_pandas(self.x) + ft.write(pathlib.Path(self.inp_dir, "data.fits"), overwrite=True) + + def fcs_func(self) -> None: + """Get the test example of fcs data.""" + fpath = fcsparser.test_sample_path + shutil.copy(fpath, self.inp_dir) + + def csv_func(self) -> None: + """Convert pandas dataframe to csv file format.""" + self.x.to_csv(pathlib.Path(self.inp_dir, "data.csv"), index=False) + + def parquet_func(self) -> None: + """Convert pandas dataframe to parquet file format.""" + self.x.to_parquet( + pathlib.Path(self.inp_dir, "data.parquet"), + engine="auto", + compression=None, + ) + + def feather_func(self) -> None: + """Convert pandas dataframe to feather file format.""" + self.x.to_feather(pathlib.Path(self.inp_dir, "data.feather")) + + def hdf_func(self) -> None: + """Convert pandas dataframe to hdf5 file format.""" + v_df = vaex.from_pandas(self.x, copy_index=False) + v_df.export(pathlib.Path(self.inp_dir, "data.hdf5")) + + def __call__(self) -> None: + """To make a class callable.""" + data_ext = { + ".hdf5": self.hdf_func, + ".csv": self.csv_func, + ".parquet": self.parquet_func, + ".feather": self.feather_func, + ".fits": self.fits_func, + ".fcs": self.fcs_func, + } + + return data_ext[self.file_pattern]() + + +FILE_EXT = [[".hdf5", ".parquet", ".csv", ".feather", ".fits", ".fcs"]] + + +@pytest.fixture(params=FILE_EXT) +def poly(request): + """To get the parameter of the fixture.""" + return request.param + + +def test_tabular_to_arrow(poly): + """Testing of tabular data conversion to arrow file format.""" + for i in poly: + if i != ".fcs": + d = Generatedata(i) + d() + file_pattern = f".*{i}" + fps = fp.FilePattern(d.get_inp_dir(), file_pattern) + for file in fps(): + tb.df_to_arrow(file[1][0], file_pattern, d.get_out_dir()) + + assert ( + all( + file[1][0].suffix + for file in fp.FilePattern(d.get_out_dir(), ".arrow") + ) + is True + ) + else: + d = Generatedata(".fcs") + d() + file_pattern = ".*.fcs" + fps = fp.FilePattern(d.get_out_dir(), file_pattern) + for file in fps(): + tb.fcs_to_arrow(file[1][0], d.get_out_dir()) + + assert ( + all( + file[1][0].suffix + for file in fp.FilePattern(d.get_out_dir(), ".arrow") + ) + is True + ) diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 0000000..dc282ac --- /dev/null +++ b/noxfile.py @@ -0,0 +1,26 @@ +"""Nox automation file.""" + +from nox import Session, session + +python_versions = ["3.9"] + + +@session(python=["3.9"]) +def export_ts(session: Session) -> None: + """Export Pydantic model as TypeScript object.""" + session.install("-r", "requirements-dev.txt") + + session.run( + "datamodel-codegen", + "--input", + "./polus/_plugins/models/PolusComputeSchema.json", + "--output", + "./polus/_plugins/models/PolusComputeSchema.py", + ) + session.run( + "pydantic2ts", + "--module", + "./polus/_plugins/models/PolusComputeSchema.py", + "--output", + "./polus/_plugins/models/PolusComputeSchema.ts", + ) diff --git a/package.json b/package.json new file mode 100644 index 0000000..5c80de7 --- /dev/null +++ b/package.json @@ -0,0 +1,30 @@ +{ + "name": "@polusai/tabular-tools", + "version": "0.1.0", + "description": "Monorepo for generic WIPP plugins", + "scripts": {}, + "repository": { + "type": "git", + "url": "git+https://github.com/polusai/tabular-tools.git" + }, + "license": "MIT", + "bugs": { + "url": "https://github.com/polusai/tabular-tools/issues" + }, + "homepage": "https://github.com/polusai/tabular-tools#readme", + "devDependencies": { + "@commitlint/cli": "^8.2.0", + "@commitlint/config-conventional": "^8.2.0", + "husky": "^3.0.8" + }, + "husky": { + "hooks": { + "commit-msg": "commitlint -E HUSKY_GIT_PARAMS" + } + }, + "commitlint": { + "extends": [ + "@commitlint/config-conventional" + ] + } +} \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..e05d93b --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,63 @@ +[tool.poetry] +authors = ["Nicholas Schaub ", "Camilo Velez "] +description = "Python API to configure and run Polus Plugins." +license = "License :: OSI Approved :: MIT License" +maintainers = ["Camilo Velez "] +name = "polus-plugins" +packages = [{include = "polus", from = "src"}] +readme = "README.md" +repository = "https://github.com/polusai/tabular-tools" +version = "0.1.1" + +[tool.poetry.dependencies] +python = ">=3.9, <3.12" + +click = "^8.1.3" +cwltool = "^3.1.20230513155734" +fsspec = "^2023.6.0" +pydantic = ">=1.10.0" +pygithub = "^1.58.2" +python-on-whales = "^0.68.0" +pyyaml = "^6.0" +tqdm = "^4.65.0" +validators = "^0.22.0" +xmltodict = "^0.13.0" + +[tool.poetry.group.dev.dependencies] +python = ">=3.9, <3.12" + +black = "^23.3.0" +bump2version = "^1.0.1" +datamodel-code-generator = "^0.23.0" +flake8 = "^6.0.0" +fsspec = "^2023.1.0" +mypy = "^1.4.0" +nox = "^2022.11.21" +poetry = "^1.3.2" +pre-commit = "^3.3.3" +pydantic = ">=1.10" +pytest = "^7.3.2" +pytest-benchmark = "^4.0.0" +pytest-cov = "^4.1.0" +pytest-sugar = "^0.9.7" +pytest-xdist = "^3.3.1" +python-on-whales = "^0.68.0" +pyyaml = "^6.0" +ruff = "^0.0.274" +tqdm = "^4.64.1" +xmltodict = "^0.13.0" + +[build-system] +build-backend = "poetry.core.masonry.api" +requires = ["poetry-core"] + +[tool.isort] +profile = "black" + +[tool.pytest.ini_options] +addopts = [ + "--import-mode=importlib", +] +markers = [ + "repo: marks tests that validate plugin.json files in local repo", +] diff --git a/to_clt.py b/to_clt.py new file mode 100644 index 0000000..a2dd9b9 --- /dev/null +++ b/to_clt.py @@ -0,0 +1,108 @@ +# ruff: noqa +"""Script to convert all WIPP manifests to CLT. + +This script will first convert all WIPP manifests to ICT and then to CLT. +WIPP -> ICT -> CLT. +""" + +# pylint: disable=W0718, W1203 +import logging +from pathlib import Path + +import typer +from ict import ICT +from tqdm import tqdm + +app = typer.Typer(help="Convert WIPP manifests to ICT.") +ict_logger = logging.getLogger("ict") +fhandler = logging.FileHandler("clt_conversion.log") +fformat = logging.Formatter( + "%(asctime)s - %(levelname)s - %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p" +) +fhandler.setFormatter(fformat) +fhandler.setLevel("INFO") +ict_logger.setLevel("INFO") +ict_logger.addHandler(fhandler) +ict_logger.setLevel(logging.INFO) +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", + datefmt="%m/%d/%Y %I:%M:%S %p", +) +logger = logging.getLogger("wipp_to_clt") +logger.addHandler(fhandler) + +REPO_PATH = Path(__file__).parent +LOCAL_MANIFESTS = list(REPO_PATH.rglob("*plugin.json")) +logger.info(f"Found {len(LOCAL_MANIFESTS)} manifests in {REPO_PATH}") +IGNORE_LIST = ["cookiecutter", ".env", "Shared-Memory-OpenMP"] +# Shared-Memory-OpenMP ignored for now until version +# and container are fixed in the manifest +LOCAL_MANIFESTS = [ + x for x in LOCAL_MANIFESTS if not any(ig in str(x) for ig in IGNORE_LIST) +] + + +@app.command() +def main( + all_: bool = typer.Option( + False, + "--all", + "-a", + help="Convert all manifests in the repository.", + ), + name: str = typer.Option( + None, + "--name", + "-n", + help="Name of the plugin to convert.", + ), +) -> None: + """Convert WIPP manifests to ICT.""" + problems = {} + converted = 0 + if not all_ and name is None: + logger.error("Please provide a name if not converting all manifests.") + raise typer.Abort + if name is not None: + if all_: + logger.warning("Ignoring --all flag since a name was provided.") + logger.info(f"name: {name}") + all_ = False + logger.info(f"all: {all_}") + if all_: + n = len(LOCAL_MANIFESTS) + for manifest in tqdm(LOCAL_MANIFESTS): + try: + ict_ = ICT.from_wipp(manifest) + ict_name = ( + ict_.name.split("/")[-1].lower() + ".cwl" # pylint: disable=E1101 + ) + ict_.save_clt(manifest.with_name(ict_name)) + + converted += 1 + + except BaseException as e: + problems[Path(manifest).parts[4:-1]] = str(e) + if name is not None: + n = 1 + for manifest in [x for x in LOCAL_MANIFESTS if name in str(x)]: + try: + ict_ = ICT.from_wipp(manifest) + ict_name = ( + ict_.name.split("/")[-1].lower() + ".cwl" # pylint: disable=E1101 + ) + ict_.save_clt(manifest.with_name(ict_name)) + converted += 1 + + except BaseException as e: + problems[Path(manifest).parts[4:-1]] = str(e) + + logger.info(f"Converted {converted}/{n} plugins") + if len(problems) > 0: + logger.error(f"Problems: {problems}") + logger.info(f"There were {len(problems)} problems in {n} manifests.") + + +if __name__ == "__main__": + app() diff --git a/to_ict.py b/to_ict.py new file mode 100644 index 0000000..fcb858d --- /dev/null +++ b/to_ict.py @@ -0,0 +1,99 @@ +# ruff: noqa +"""Script to convert all WIPP manifests to ICT.""" + +# pylint: disable=W0718, W1203 +import logging +from pathlib import Path + +import typer +from ict import ICT, validate +from tqdm import tqdm + +app = typer.Typer(help="Convert WIPP manifests to ICT.") +ict_logger = logging.getLogger("ict") +fhandler = logging.FileHandler("ict_conversion.log") +fformat = logging.Formatter( + "%(asctime)s - %(levelname)s - %(message)s", datefmt="%m/%d/%Y %I:%M:%S %p" +) +fhandler.setFormatter(fformat) +fhandler.setLevel("INFO") +ict_logger.setLevel("INFO") +ict_logger.addHandler(fhandler) +ict_logger.setLevel(logging.INFO) +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", + datefmt="%m/%d/%Y %I:%M:%S %p", +) +logger = logging.getLogger("wipp_to_ict") +logger.addHandler(fhandler) + +REPO_PATH = Path(__file__).parent +LOCAL_MANIFESTS = list(REPO_PATH.rglob("*plugin.json")) +logger.info(f"Found {len(LOCAL_MANIFESTS)} manifests in {REPO_PATH}") +IGNORE_LIST = ["cookiecutter", ".env", "Shared-Memory-OpenMP"] +# Shared-Memory-OpenMP ignored for now until version +# and container are fixed in the manifest +LOCAL_MANIFESTS = [ + x for x in LOCAL_MANIFESTS if not any(ig in str(x) for ig in IGNORE_LIST) +] + + +@app.command() +def main( + all_: bool = typer.Option( + False, + "--all", + "-a", + help="Convert all manifests in the repository.", + ), + name: str = typer.Option( + None, + "--name", + "-n", + help="Name of the plugin to convert.", + ), +) -> None: + """Convert WIPP manifests to ICT.""" + problems = {} + converted = 0 + if not all_ and name is None: + logger.error("Please provide a name if not converting all manifests.") + raise typer.Abort + if name is not None: + if all_: + logger.warning("Ignoring --all flag since a name was provided.") + logger.info(f"name: {name}") + all_ = False + logger.info(f"all: {all_}") + if all_: + n = len(LOCAL_MANIFESTS) + for manifest in tqdm(LOCAL_MANIFESTS): + try: + ict_ = ICT.from_wipp(manifest) + yaml_path = ict_.save_yaml(manifest.with_name("ict.yaml")) + validate(yaml_path) + converted += 1 + + except BaseException as e: + problems[Path(manifest).parts[4:-1]] = str(e) + if name is not None: + n = 1 + for manifest in [x for x in LOCAL_MANIFESTS if name in str(x)]: + try: + ict_ = ICT.from_wipp(manifest) + yaml_path = ict_.save_yaml(manifest.with_name("ict.yaml")) + validate(yaml_path) + converted += 1 + + except BaseException as e: + problems[Path(manifest).parts[4:-1]] = str(e) + + logger.info(f"Converted {converted}/{n} plugins") + if len(problems) > 0: + logger.error(f"Problems: {problems}") + logger.info(f"There were {len(problems)} problems in {n} manifests.") + + +if __name__ == "__main__": + app() diff --git a/transforms/polus-csv-merger-plugin/README.md b/transforms/polus-csv-merger-plugin/README.md new file mode 100644 index 0000000..704f340 --- /dev/null +++ b/transforms/polus-csv-merger-plugin/README.md @@ -0,0 +1,35 @@ +# CSV Row Merger + +This WIPP plugin merges all csv files in a csv collection into one or more csv files using either row or column merging. + +**If row merging**, csv files are assumed to have headers (column titles) in the first row. If headers are not the same between all files, csv files that don't have a specific column header will have the column filled with 'NaN' values. A column titled `file` is created in the output file, and this contains the name of the original input csv file associated with the row of data. **This plugin creates a csvCollection with a single csv file.** + +**If column merging**, it is assumed that all files have a column titled `file` that is used to merge columns across csv files. If some files have a `file` column value that does not match another csv file, then a new row is generated with the specified value in `file` and missing column values are filled with `NaN` values. **This plugin creates a csvCollection with a single csv file.** + +**When column merging, if sameRows==true**, then no `file` column needs to be present. All files with the same number of columns will be merged into one csv file. **This plugin creates a csvCollection with as many csv files as there are unique numbers of rows in the csv collection.** + +If `stripExtension` is set to true, then the `.csv` file extension is removed from the file name in the `file` column. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes two input argument and one output argument: + +| Name | Description | I/O | Type | +|--------------------|------------------------------------------------------------|--------|---------------| +| `--inpDir` | Input image collection to be processed by this plugin | Input | collection | +| `--stripExtension` | Should csv be removed from the filename in the output file | Input | boolean | +| `--dim` | Perform `rows` or `columns` merger | Input | string | +| `--sameRows` | Only merge csv files with the same number of rows? | Input | boolean | +| `--outDir` | Output csv file | Output | csvCollection | + diff --git a/transforms/polus-csv-merger-plugin/VERSION b/transforms/polus-csv-merger-plugin/VERSION new file mode 100644 index 0000000..60a2d3e --- /dev/null +++ b/transforms/polus-csv-merger-plugin/VERSION @@ -0,0 +1 @@ +0.4.0 \ No newline at end of file diff --git a/transforms/polus-csv-merger-plugin/build-docker.sh b/transforms/polus-csv-merger-plugin/build-docker.sh new file mode 100755 index 0000000..758b23c --- /dev/null +++ b/transforms/polus-csv-merger-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$( 0) { + for (i in 1:length(excludes)) { + if(!excludes[i] %in% colnames(dataset)) { + logwarn('column to exclude from %s is not found',file_name) + } + } + datasub <-dataset[ , !(names(dataset) %in% excludes)] + } + else if(length(excludes) == 0) { + datasub <-dataset + } + # Remove columns with all values as zero + datasub <- datasub[colSums(datasub) > 0] + + #Check whether predict column is present in dataframe + if(!(predictcolumn %in% colnames(datasub))) { + logwarn('predict column name is not found in %s',file_name) + next + } + + #Get column names without predict variable + drop_dep <- datasub[ , !(names(datasub) %in% predictcolumn)] + resp_var <- colnames(drop_dep) + + #Number of cores + num_of_cores = detectCores() + loginfo('Cores = %s', num_of_cores) + + #Chunk Size + chunk <- floor((nrow(datasub)/ncol(datasub))*num_of_cores) + + #Function to determine chunks + make.data<-function(formula,data,chunksize,...){ + n<-nrow(data) + cursor<-0 + datafun<-function(reset=FALSE){ + if (reset){ + cursor<<-0 + return(NULL) + } + if (cursor>=n) + return(NULL) + start<-cursor+1 + cursor<<-cursor+min(chunksize, n-cursor) + data[start:cursor,] + } + } + + #Convert to ffdf object + datasub_ff = as.ffdf(datasub) + + #Chunk data + chunk_data <-make.data(formula(paste(predictcolumn,paste(resp_var,collapse= "+"),sep="~")), datasub_ff, chunksize=chunk) + + if((modeltype == 'Gaussian') || (modeltype == 'Poisson') || (modeltype == 'Binomial') || (modeltype == 'Quasibinomial') || (modeltype == 'Quasipoisson') || (modeltype == 'Quasi')) { + modeltype <- tolower(modeltype) + } + + if (modeltype == 'NegativeBinomial') { + fit <- glm.nb(as.formula(paste(predictcolumn,1,sep="~")), data = datasub) + mu <- exp(coef(fit)) + val_pred<-eval(parse(text=paste('datasub',predictcolumn, sep = "$"))) + theta_val = theta.ml(val_pred, mu,nrow(datasub), limit = 22, eps = .Machine$double.eps^0.25, trace = FALSE) + } + + model_list <- c('gaussian','Gamma', 'binomial', 'poisson', 'quasi', 'quasibinomial', 'quasipoisson' ) + + model_data <- function(pred_var, data_model) { + if((modeltype %in% model_list)) { + reg_model <- bigglm(formula(paste(predictcolumn,paste(pred_var,collapse= "+"),sep="~")), data = data_model, family = eval(parse(text=paste(modeltype,"()", sep = ""))), chunksize = chunk) + } + else if(modeltype == 'NegativeBinomial') { + reg_model <- bigglm(formula(paste(predictcolumn,paste(pred_var,collapse= "+"),sep="~")), data = data_model, family = negative.binomial(theta= theta_val), chunksize=chunk) + } + else if(modeltype == 'Multinomial') { + reg_model <- multinom(formula(paste(paste("as.factor(",predictcolumn,")"),paste(pred_var,collapse= "+"),sep="~")), data = data_model, maxit=10, MaxNWts = 10000) + } + return(reg_model) + } + + #Model data based on the options selected + #Get only main effects of the variables + if (glmmethod == 'PrimaryFactors') { + if (modeltype != 'Multinomial') { + test_glm<- model_data(resp_var,chunk_data) + } + else if (modeltype == 'Multinomial') { + test_glm<- model_data(resp_var,datasub_ff) + } + } + #Get interaction values + else if (glmmethod == 'Interaction') { + datasub_pred <- datasub[ , !(names(datasub) %in% predictcolumn)] + #Get correlation between variables + tmp <- cor(datasub_pred) + tmp[upper.tri(tmp)] <- 0 + diag(tmp) <- 0 + + #Remove variables with no interaction + data_no_int <- which(tmp >= 0.1 | tmp < -0.1, arr.ind = TRUE) + data_frame<-data.frame(row = rownames(data_no_int), col = colnames(tmp)[data_no_int[, "col"]], + value = tmp[tmp >= 0.1 | tmp < -0.1]) + colnames(data_frame)<- c("variable1","variable2","coef") + + #Interaction variables + data_frame$variableint <- paste(data_frame$variable1, data_frame$variable2, sep="*") + data_list <- as.character(data_frame$variableint) + if (modeltype != 'Multinomial') { + test_glm<- model_data(data_list,chunk_data) + } + else if (modeltype == 'Multinomial') { + test_glm<- model_data(data_list, datasub_ff) + } + } + #Get second order polynomial values + else if (glmmethod == 'SecondOrder') { + var_resp <- paste('poly(',resp_var,',2)') + if (modeltype != 'Multinomial') { + test_glm<- model_data(var_resp,chunk_data) + } + else if (modeltype == 'Multinomial') { + test_glm<- model_data(var_resp,datasub_ff) + } + } + + #Set output directory + setwd(csvfile) + file_save <- paste0(file_name,".csv") + + #Convert summary of the analysis to a dataframe + tidy_summary <- tidy(test_glm) + + #Reorder the columns + tidy_final <- tidy_summary[c("term", "p.value", "estimate","std.error")] + colnames(tidy_final) <- c("Factors","P-Value","Estimate","Std.Error") + + #Write the dataframe to csv file + write.csv(tidy_final, file_save) + } +} \ No newline at end of file diff --git a/transforms/tabular-thresholding-tool/.bumpversion.cfg b/transforms/tabular-thresholding-tool/.bumpversion.cfg new file mode 100644 index 0000000..695dc97 --- /dev/null +++ b/transforms/tabular-thresholding-tool/.bumpversion.cfg @@ -0,0 +1,27 @@ +[bumpversion] +current_version = 0.1.6-dev0 +commit = True +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:plugin.json] + +[bumpversion:file:VERSION] + +[bumpversion:file:src/polus/images/transforms/tabular/tabular_thresholding/__init__.py] diff --git a/transforms/tabular-thresholding-tool/.gitignore b/transforms/tabular-thresholding-tool/.gitignore new file mode 100644 index 0000000..e891280 --- /dev/null +++ b/transforms/tabular-thresholding-tool/.gitignore @@ -0,0 +1,175 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +poetry.lock +../../poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ + +# vscode +.vscode + +# test data directory +data +# yaml file +.pre-commit-config.yaml + +# hidden files +.DS_Store +.ds_store +# flake8 +.flake8 diff --git a/transforms/tabular-thresholding-tool/Dockerfile b/transforms/tabular-thresholding-tool/Dockerfile new file mode 100644 index 0000000..9943e2e --- /dev/null +++ b/transforms/tabular-thresholding-tool/Dockerfile @@ -0,0 +1,20 @@ +FROM polusai/bfio:2.1.9 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".csv" + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +RUN pip3 install --index-url https://test.pypi.org/simple/ filepattern==2.2.7 +COPY src ${EXEC_DIR}/src + +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +ENTRYPOINT ["python3", "-m", "polus.images.transforms.tabular.tabular_thresholding"] +CMD ["--help"] diff --git a/transforms/tabular-thresholding-tool/README.md b/transforms/tabular-thresholding-tool/README.md new file mode 100644 index 0000000..f79831f --- /dev/null +++ b/transforms/tabular-thresholding-tool/README.md @@ -0,0 +1,47 @@ +# Tabular Thresholding Plugin (v0.1.3) +This plugin uses three [threshold methods](https://github.com/nishaq503/thresholding.git) to compute threshold values on a user-defined variable and then determines if each label (ROI) is above or below the calculated threshold value. A new feature column will be computed for selected threshold method with the values in binary format (0, 1) \ +*0* `negative or below threshold`\ +*1* `positive or above threshold` + +## Threshold methods + +### *1-* False Positive Rate +It estimates mean and standard deviation of `negControl` values based on the assumption that it follows a single guassian distribution and computes threshold such that the area to the right is equal to a user-defined `falsePositiverate`. Values must range between 0 and 1 + +### *2-* OTSU +It computes threshold by using `negControl` and `posControl` values to minimize the weighted variance of these two classes. `numBins` are number of bins to compute histogram of `negControl` and `posControl` values + +### *3-* MEAN+Sigma +It computes threshold by calculating mean and `n` number of standard deviations of `negControl` values. + +Contact [Hamdah Shafqat Abbasi](mailto: hamdah.abbasi@axleinfo.com) for more information. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the +contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes 10 input arguments and one output argument: + +| Name | Description | I/O | Type | +|-------------------------|-----------------------------------------------------|--------|---------------| +| `--inpDir` | Input directory containing tabular data CSVs | Input | genericData | +| `--filePattern` | Pattern to parse tabular files | Input | string | +| `--negControl` | FeatureName describing non treated wells/ROI | Input | string | +| `--posControl` | FeatureName describing treated wells/ROI | Input | string | +| `--varName` | FeatureName for thresholding | Input | string | +| `--thresholdType` | See above in README | Input | enum | +| `--falsePositiverate` | Area to the right of the threshold | Input | float | +| `--numBins` | Number of bins for histogram | Input | number | +| `--n` | Number of standard deviation | Input | number | +| `--outFormat` | Output file format | Input | enum | +| `--outDir` | Output collection | Output | genericData | diff --git a/transforms/tabular-thresholding-tool/VERSION b/transforms/tabular-thresholding-tool/VERSION new file mode 100644 index 0000000..9518919 --- /dev/null +++ b/transforms/tabular-thresholding-tool/VERSION @@ -0,0 +1 @@ +0.1.6-dev0 diff --git a/transforms/tabular-thresholding-tool/build-docker.sh b/transforms/tabular-thresholding-tool/build-docker.sh new file mode 100644 index 0000000..45824a8 --- /dev/null +++ b/transforms/tabular-thresholding-tool/build-docker.sh @@ -0,0 +1,2 @@ +version=$(", +"Najib Ishaq " +] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9" +filepattern = "^2.0.4" +typer = "^0.7.0" +tqdm = "^4.64.1" +blake3 = "^0.3.3" +llvmlite = "^0.39.1" +fastapi = "^0.92.0" +astropy = "5.2.1" +vaex = "^4.17.0" + + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +pre-commit = "^3.1.0" +black = "^23.1.0" +flake8 = "^6.0.0" +mypy = "^1.0.1" +pytest = "^7.2.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/transforms/tabular-thresholding-tool/run-plugin.sh b/transforms/tabular-thresholding-tool/run-plugin.sh new file mode 100755 index 0000000..55e02a5 --- /dev/null +++ b/transforms/tabular-thresholding-tool/run-plugin.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# version=$( None: + """Calculate binary thresholds for tabular data.""" + starttime = time.time() + logger.info(f"inpDir = {inp_dir}") + logger.info(f"outDir = {out_dir}") + logger.info(f"filePattern = {file_pattern}") + logger.info(f"negControl = {neg_control}") + logger.info(f"posControl = {pos_control}") + logger.info(f"varName = {var_name}") + logger.info(f"thresholdType = {threshold_type}") + logger.info(f"falsePositiverate = {false_positive_rate}") + logger.info(f"numBins = {num_bins}") + logger.info(f"n = {n}") + logger.info(f"outFormat = {out_format}") + + inp_dir = inp_dir.resolve() + out_dir = out_dir.resolve() + + assert inp_dir.exists(), f"{inp_dir} doesnot exists!! Please check input path again" + assert ( + out_dir.exists() + ), f"{out_dir} doesnot exists!! Please check output path again" + # By default it ingests all input files if not file_pattern is defined + file_pattern = ".*" + file_pattern + + fps = fp.FilePattern(inp_dir, file_pattern) + + if preview: + with open(pathlib.Path(out_dir, "preview.json"), "w") as jfile: + out_json: dict[Union[str, List], Any] = { + "filepattern": file_pattern, + "outDir": [], + } + for file in fps: + out_name = str(file[1][0].name.split(".")[0]) + "_binary" + out_format + thr_json = str(file[1][0].name.split(".")[0]) + "_thresholds.json" + out_json["outDir"].append(out_name) + out_json["outDir"].append(thr_json) + + json.dump(out_json, jfile, indent=2) + + num_workers = max(multiprocessing.cpu_count() // 2, 2) + + flist = [f[1][0] for f in fps] + logger.info(f"Number of tabular files detected: {len(flist)}, filenames: {flist}") + assert len(flist) != 0, f"No tabular file is detected: {flist}" + + with multiprocessing.Pool(processes=num_workers) as executor: + executor.map( + partial( + tt.thresholding_func, + neg_control, + pos_control, + var_name, + threshold_type, + false_positive_rate, + num_bins, + n, + out_format, + out_dir, + ), + flist, + ) + executor.close() + executor.join() + + # Deleting intermediate files from input directory + for f in inp_dir.iterdir(): + if f.is_file() and file_pattern != ".*.hdf5": + if f.suffix in [".hdf5", ".yaml"]: + os.remove(f) + else: + if ".hdf5.hdf5" in f.name or f.suffix == ".yaml": + os.remove(f) + + endtime = round((time.time() - starttime) / 60, 3) + logger.info(f"Time taken to process binary threhold CSVs: {endtime} minutes!!!") + return + + +if __name__ == "__main__": + app() diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/tabular_thresholding.py b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/tabular_thresholding.py new file mode 100644 index 0000000..d00ec2b --- /dev/null +++ b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/tabular_thresholding.py @@ -0,0 +1,169 @@ +"""Tabular Thresholding.""" +import enum +import json +import logging +import os +import pathlib +import warnings +from typing import Dict, Optional, Union + +import numpy as np +import vaex + +from .thresholding import custom_fpr +from.thresholding import n_sigma +from .thresholding import otsu + +logger = logging.getLogger(__name__) + +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".arrow") + + +class Extensions(str, enum.Enum): + """File format of an output file.""" + + CSV = ".csv" + ARROW = ".arrow" + PARQUET = ".parquet" + HDF = ".hdf5" + FEATHER = ".feather" + Default = POLUS_TAB_EXT + + +class Methods(str, enum.Enum): + """Threshold methods.""" + + OTSU = "otsu" + NSIGMA = "n_sigma" + FPR = "fpr" + ALL = "all" + Default = "all" + + +def thresholding_func( + neg_control: str, + pos_control: str, + var_name: str, + threshold_type: Methods, + false_positive_rate: Optional[float], + num_bins: Optional[int], + n: Optional[int], + out_format: Extensions, + out_dir: pathlib.Path, + file: pathlib.Path, +) -> None: + """Compute variable threshold using negative or negative and positive control data. + + Computes the variable value of each ROI if above or below threshold. The control data used for computing threshold depends on the type of thresholding methods + https://github.com/nishaq503/thresholding.git. + Args: + file: Filename. + neg_control: Column name containing information of non treated wells. + pos_control:Column name containing information of wells with the known treatment. + var_name:Column name for computing thresholds. + threshold_type:Name of threshold method. + out_format: Output file extension. + false_positive_rate: Tuning parameter. + num_bins: Number of bins. + n: Number of standard deviation away from mean value. + + """ + chunk_size = 100_000 + if file.suffix == ".csv": + df = vaex.from_csv(file, convert=True, chunk_size=chunk_size) + else: + df = vaex.open(file, convert=True, progress=True) + + assert any( + item in [var_name, neg_control, pos_control] for item in list(df.columns) + ), f"They are missing {var_name}, {neg_control}, {pos_control} column names tabular data file. Please do check variables again!!" + + assert df.shape != ( + 0, + 0, + ), f"File {file} is not loaded properly! Please do check input files again" + + if pos_control is None: + logger.info( + "Otsu threshold will not be computed as it requires information of both neg_control & pos_control" + ) + + threshold_dict: Dict[str, Union[float, str]] = {} + plate = file.stem + threshold_dict["plate"] = plate + + if df[neg_control].unique() != [0.0, 1.0]: + warnings.warn("controls are missing. NaN value are computed for thresholds") + nan_value = np.nan * np.arange(0, len(df[neg_control].values), 1) + threshold_dict["fpr"] = np.nan + threshold_dict["otsu"] = np.nan + threshold_dict["nsigma"] = np.nan + df["fpr"] = nan_value + df["otsu"] = nan_value + df["nsigma"] = nan_value + + else: + pos_controls = df[df[pos_control] == 1][var_name].values + neg_controls = df[df[pos_control] == 1][var_name].values + + if threshold_type == "fpr": + print(threshold_type) + threshold = custom_fpr.find_threshold( + neg_controls, false_positive_rate=false_positive_rate + ) + threshold_dict[threshold_type] = threshold + df[threshold_type] = df.func.where(df[var_name] <= threshold, 0, 1) + elif threshold_type == "otsu": + combine_array = np.append(neg_controls, pos_controls, axis=0) + threshold = otsu.find_threshold( + combine_array, num_bins=num_bins, normalize_histogram=False + ) + threshold_dict[threshold_type] = threshold + df[threshold_type] = df.func.where(df[var_name] <= threshold, 0, 1) + elif threshold_type == "nsigma": + threshold = n_sigma.find_threshold(neg_controls, n=n) + threshold_dict[threshold_type] = threshold + df[threshold_type] = df.func.where(df[var_name] <= threshold, 0, 1) + elif threshold_type == "all": + fpr_thr = custom_fpr.find_threshold( + neg_controls, false_positive_rate=false_positive_rate + ) + combine_array = np.append(neg_controls, pos_controls, axis=0) + + if len(pos_controls) == 0: + warnings.warn( + "controls are missing. NaN value are computed for otsu thresholds" + ) + threshold_dict["otsu"] = np.nan + df["otsu"] = np.nan * np.arange(0, len(df[var_name].values), 1) + else: + otsu_thr = otsu.find_threshold( + combine_array, num_bins=num_bins, normalize_histogram=False + ) + threshold_dict["otsu"] = otsu_thr + df["otsu"] = df.func.where(df[var_name] <= otsu_thr, 0, 1) + + nsigma_thr = n_sigma.find_threshold(neg_controls, n=n) + threshold_dict["fpr"] = fpr_thr + threshold_dict["nsigma"] = nsigma_thr + df["fpr"] = df.func.where(df[var_name] <= fpr_thr, 0, 1) + df["nsigma"] = df.func.where(df[var_name] <= nsigma_thr, 0, 1) + + outjson = pathlib.Path(out_dir).joinpath(f"{plate}_thresholds.json") + with open(outjson, "w") as outfile: + json.dump(threshold_dict, outfile) + logger.info(f"Saving Thresholds in JSON fileformat {outjson}") + + if f"{out_format}" in [".feather", ".arrow"]: + outname = pathlib.Path(out_dir, f"{plate}_binary{out_format}") + df.export_feather(outname) + logger.info(f"Saving f'{plate}_binary{out_format}") + elif f"{out_format}" == ".csv": + outname = pathlib.Path(out_dir).joinpath(f"{plate}_binary{out_format}") + df.export_csv(path=outname, chunk_size=chunk_size) + else: + outname = pathlib.Path(out_dir).joinpath(f"{plate}_binary{out_format}") + df.export(outname, progress=True) + logger.info(f"Saving f'{plate}_binary{out_format}") + + return diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/__init__.py b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/__init__.py new file mode 100644 index 0000000..5e67d64 --- /dev/null +++ b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/__init__.py @@ -0,0 +1,6 @@ +"""Tabular Thresholding.""" +__version__ = "0.1.3" + +from . import custom_fpr +from . import n_sigma +from . import otsu diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/custom_fpr.py b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/custom_fpr.py new file mode 100644 index 0000000..4a0fd6d --- /dev/null +++ b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/custom_fpr.py @@ -0,0 +1,36 @@ +"""Tabular Thresholding.""" +import statistics + +import numpy + + +def find_threshold( + values: numpy.ndarray, + false_positive_rate: float, +) -> float: + """Compute a threshold value using a user-specified false positive rate. + + We assume that the `negative_values` follow a single gaussian distribution. + We estimate the mean and standard deviation of this distribution and + compute a threshold such that the area to the right of the threshold is + equal to the given `false_positive_rate`. + + Args: + values: drawn from a single gaussian distribution. + false_positive_rate: A user-defined tuning parameter. + + Returns: + The computed threshold value. + """ + if not (0 < false_positive_rate < 1): + raise ValueError( + f"`false_positive_rate` mut be in the range (0, 1). Got {false_positive_rate:.2e} instead." + ) + + mu = float(numpy.mean(values)) + sigma = float(numpy.std(values)) + + distribution = statistics.NormalDist(mu, sigma) + threshold = distribution.inv_cdf(1 - false_positive_rate) + + return threshold diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/n_sigma.py b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/n_sigma.py new file mode 100644 index 0000000..6c72279 --- /dev/null +++ b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/n_sigma.py @@ -0,0 +1,18 @@ +"""Tabular Thresholding.""" +import numpy + + +def find_threshold(values: numpy.ndarray, n: int = 4) -> float: + """Compute the threshold as `mu + n * sigma`. + + Args: + values: 1d array of values over which tom compute the threshold. + n: number of standard deviations to go away from the mean. + + Returns: + The threshold value. + """ + mu = numpy.mean(values) + sigma = numpy.std(values) + threshold = mu + n * sigma + return float(threshold) diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/otsu.py b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/otsu.py new file mode 100644 index 0000000..8fc2281 --- /dev/null +++ b/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/otsu.py @@ -0,0 +1,45 @@ +"""Tabular Thresholding.""" +import numpy + + +def find_threshold( + values: numpy.ndarray, + num_bins: int = 256, + normalize_histogram: bool = False, +) -> float: + """Compute the otsu threshold for the given values. + + Args: + values: 1d array of values + num_bins: to use for a histogram + normalize_histogram: Whether to normalize the histogram by max + frequency. + Returns: + The calculated threshold value. + """ + # Get the image histogram + hist, bin_edges = numpy.histogram(values, bins=num_bins) + + # Get normalized histogram if it is required + if normalize_histogram: + hist = numpy.divide(hist.ravel(), hist.max(initial=0)) + + # Calculate centers of bins + bin_mids = (bin_edges[:-1] + bin_edges[1:]) / 2.0 + + # Iterate over all thresholds (indices) and get the probabilities w1(t), w2(t) + weight1 = numpy.cumsum(hist) + weight2 = numpy.cumsum(hist[::-1])[::-1] + + # Get the class means mu0(t) + mean1 = numpy.cumsum(hist * bin_mids) / weight1 + # Get the class means mu1(t) + mean2 = (numpy.cumsum((hist * bin_mids)[::-1]) / weight2[::-1])[::-1] + + inter_class_variance = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 + + # Maximize the inter_class_variance + index_of_max_var = numpy.argmax(inter_class_variance) + + threshold = bin_mids[:-1][index_of_max_var] + return float(threshold) diff --git a/transforms/tabular-thresholding-tool/tabular-thresholding-plugin.cwl b/transforms/tabular-thresholding-tool/tabular-thresholding-plugin.cwl new file mode 100644 index 0000000..012a87c --- /dev/null +++ b/transforms/tabular-thresholding-tool/tabular-thresholding-plugin.cwl @@ -0,0 +1,60 @@ +class: CommandLineTool +cwlVersion: v1.2 +inputs: + falsePositiverate: + inputBinding: + prefix: --falsePositiverate + type: double? + filePattern: + inputBinding: + prefix: --filePattern + type: string? + inpDir: + inputBinding: + prefix: --inpDir + type: Directory + n: + inputBinding: + prefix: --n + type: double? + negControl: + inputBinding: + prefix: --negControl + type: string + numBins: + inputBinding: + prefix: --numBins + type: double? + outDir: + inputBinding: + prefix: --outDir + type: Directory + outFormat: + inputBinding: + prefix: --outFormat + type: string + posControl: + inputBinding: + prefix: --posControl + type: string? + thresholdType: + inputBinding: + prefix: --thresholdType + type: string + varName: + inputBinding: + prefix: --varName + type: string +outputs: + outDir: + outputBinding: + glob: $(inputs.outDir.basename) + type: Directory +requirements: + DockerRequirement: + dockerPull: polusai/tabular-thresholding-tool:0.1.6-dev0 + InitialWorkDirRequirement: + listing: + - entry: $(inputs.outDir) + writable: true + InlineJavascriptRequirement: {} diff --git a/transforms/tabular-thresholding-tool/tests/__init_.py b/transforms/tabular-thresholding-tool/tests/__init_.py new file mode 100644 index 0000000..6711b98 --- /dev/null +++ b/transforms/tabular-thresholding-tool/tests/__init_.py @@ -0,0 +1 @@ +"""Testing of Tabular Thresholding.""" diff --git a/transforms/tabular-thresholding-tool/tests/test_main.py b/transforms/tabular-thresholding-tool/tests/test_main.py new file mode 100644 index 0000000..9b5f859 --- /dev/null +++ b/transforms/tabular-thresholding-tool/tests/test_main.py @@ -0,0 +1,144 @@ +"""Tabular Thresholding.""" + +import pathlib +import random +import shutil +import string +import tempfile + +import filepattern as fp +import numpy as np +import pandas as pd +import pytest +import vaex +from polus.images.transforms.tabular.tabular_thresholding import ( + tabular_thresholding as tt, +) + + +class Generatedata: + """Generate tabular data with several different file format.""" + + def __init__(self, file_pattern: str, size: int, outname: str) -> None: + """Define instance attributes.""" + self.dirpath = pathlib.Path.cwd() + self.inp_dir = tempfile.mkdtemp(dir=self.dirpath) + self.out_dir = tempfile.mkdtemp(dir=self.dirpath) + self.file_pattern = file_pattern + self.size = size + self.outname = outname + self.x = self.create_dataframe() + + def get_inp_dir(self) -> pathlib.Path: + """Get input directory.""" + return pathlib.Path(self.inp_dir) + + def get_out_dir(self) -> pathlib.Path: + """Get output directory.""" + return pathlib.Path(self.out_dir) + + def create_dataframe(self) -> pd.core.frame.DataFrame: + """Create Pandas dataframe.""" + diction_1 = { + "A": list(range(self.size)), + "B": [random.choice(string.ascii_letters) for i in range(self.size)], + "C": np.random.randint(low=1, high=100, size=self.size), + "D": np.random.normal(0.0, 1.0, size=self.size), + "MEAN": np.linspace(1.0, 4000.0, self.size), + "neg_control": [random.choice("01") for i in range(self.size)], + "pos_neutral": [random.choice("01") for i in range(self.size)], + } + + df = pd.DataFrame(diction_1) + df["neg_control"] = df["neg_control"].astype(int) + df["pos_neutral"] = df["pos_neutral"].astype(int) + + return df + + def csv_func(self) -> None: + """Convert pandas dataframe to csv file format.""" + self.x.to_csv(pathlib.Path(self.inp_dir, self.outname), index=False) + + def parquet_func(self) -> None: + """Convert pandas dataframe to parquet file format.""" + self.x.to_parquet( + pathlib.Path(self.inp_dir, self.outname), + engine="auto", + compression=None, + ) + + def feather_func(self) -> None: + """Convert pandas dataframe to feather file format.""" + self.x.to_feather(pathlib.Path(self.inp_dir, self.outname)) + + def arrow_func(self) -> None: + """Convert pandas dataframe to Arrow file format.""" + self.x.to_feather(pathlib.Path(self.inp_dir, self.outname)) + + def hdf_func(self) -> None: + """Convert pandas dataframe to hdf5 file format.""" + v_df = vaex.from_pandas(self.x, copy_index=False) + v_df.export(pathlib.Path(self.inp_dir, self.outname)) + + def __call__(self) -> None: + """To make a class callable.""" + data_ext = { + ".hdf5": self.hdf_func, + ".csv": self.csv_func, + ".parquet": self.parquet_func, + ".feather": self.feather_func, + ".arrow": self.arrow_func, + } + + return data_ext[self.file_pattern]() + + def clean_directories(self): + """Remove files.""" + for d in self.dirpath.iterdir(): + if d.is_dir() and d.name.startswith("tmp"): + shutil.rmtree(d) + + +EXT = [[".csv", ".feather", ".arrow", ".parquet", ".hdf5"]] + + +@pytest.fixture(params=EXT) +def poly(request): + """To get the parameter of the fixture.""" + return request.param + + +def test_tabular_thresholding(poly): + """Testing of merging of tabular data by rows with equal number of rows.""" + for i in poly: + d = Generatedata(i, outname=f"data_1{i}", size=1000000) + d() + pattern = f".*{i}" + fps = fp.FilePattern(d.get_inp_dir(), pattern) + for file in fps(): + tt.thresholding_func( + neg_control="neg_control", + pos_control="pos_neutral", + var_name="MEAN", + threshold_type="all", + false_positive_rate=0.01, + num_bins=512, + n=4, + out_format=i, + out_dir=d.get_out_dir(), + file=file[1][0], + ) + + assert i in [f.suffix for f in d.get_out_dir().iterdir()] + + df = vaex.open( + pathlib.Path(d.get_out_dir(), file[1][0].stem + "_binary" + i), + ) + threshold_methods = ["fpr", "otsu", "nsigma"] + assert (all(item in list(df.columns) for item in threshold_methods)) is True + assert np.allclose(np.unique(df[threshold_methods]), [0, 1]) is True + assert file[1][0].stem + "_thresholds.json" in [ + f.name for f in d.get_out_dir().iterdir() + ] + + d.clean_directories() diff --git a/utils/filepattern-generator-plugin/Dockerfile b/utils/filepattern-generator-plugin/Dockerfile new file mode 100644 index 0000000..45ce46b --- /dev/null +++ b/utils/filepattern-generator-plugin/Dockerfile @@ -0,0 +1,9 @@ +FROM polusai/bfio:2.1.9 + +ENV EXEC_DIR="/opt/executables" +RUN mkdir -p ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY src ${EXEC_DIR}/ +RUN pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir + +ENTRYPOINT ["python3", "/opt/executables/main.py"] diff --git a/utils/filepattern-generator-plugin/README.md b/utils/filepattern-generator-plugin/README.md new file mode 100644 index 0000000..9fdc742 --- /dev/null +++ b/utils/filepattern-generator-plugin/README.md @@ -0,0 +1,36 @@ +# Filepattern Generator + + +Filepattern Generator plugin creates a json containing a number of new filepatterns, where each filepattern will subset the image data in the directory + +Contact [Nick Schaub , Hamdah Shafqat Abbasi](mailto:nick.schaub@nih.gov, hamdah.abbasi@axleinfo.com) for more information. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the +contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes 5 input arguments and +1 output argument: + +| Name | Description | I/O | Type | +|---------------|---------------------------------------------------------------|--------|---------------| +| `--inpDir` | Input image directory | Input | collection | +| `--pattern` | Filepattern to parse image files | Input | string | +| `--chunkSize` | Number of images to generate collective filepattern | Input | number | +| `--groupBy` | Select a parameter to generate filepatterns in specific order | Input | string | +| `--outDir` | Output generic collection | Output | genericData | + + + + diff --git a/utils/filepattern-generator-plugin/VERSION b/utils/filepattern-generator-plugin/VERSION new file mode 100644 index 0000000..7dff5b8 --- /dev/null +++ b/utils/filepattern-generator-plugin/VERSION @@ -0,0 +1 @@ +0.2.1 \ No newline at end of file diff --git a/utils/filepattern-generator-plugin/build-docker.sh b/utils/filepattern-generator-plugin/build-docker.sh new file mode 100755 index 0000000..107f7e0 --- /dev/null +++ b/utils/filepattern-generator-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$( Tuple[str, int]: + + """This function produces the best combination of variables for a given chunksize + Args: + inpDir (Path): Path to Image files + pattern (str, optional): Regex to parse image files + groupBy (str, optional): Specify variable to group image filenames + chunk_size (str, optional): Number of images to generate collective filepattern + Returns: + variables for grouping image filenames, count + """ + + fp = filepattern.FilePattern(inpDir, pattern) + + # Get the number of unique values for each variable + counts = {k: len(v) for k, v in fp.uniques.items()} + + # Check to see if groupBy already gives a sufficient chunkSize + best_count = 0 + if groupBy is None: + for k, v in counts.items(): + if v <= chunkSize and v < best_count: + best_group, best_count = k, v + elif best_count == 0: + best_group, best_count = k, v + groupBy = best_group + + count = 1 + for v in groupBy: + count *= counts[v] + if count >= chunkSize: + return groupBy, count + best_group, best_count = groupBy, count + + # Search for a combination of `variables` that give a value close to the chunk_size + variables = [v for v in fp.variables if v not in groupBy] + for i in range(len(variables)): + groups = {best_group: best_count} + for p in combinations(variables, i): + group = groupBy + "".join("".join(c) for c in p) + count = 1 + for v in group: + count *= counts[v] + groups[group] = count + + # If all groups are over the chunk_size, then return just return the best_group + if all(v > chunkSize for k, v in groups.items()): + return best_group, best_count + + # Find the best_group + for k, v in groups.items(): + if v > chunkSize: + continue + if v > best_count: + best_group, best_count = k, v + return best_group, best_count + + +def save_generator_outputs(x: Dict[str, int], outDir: Path): + """Convert dictionary of filepatterns and number of image files which can be parsed with each filepattern to json file + Args: + x (Dict): A dictionary of filepatterns and number of image files which can be parsed with each filepattern + outDir (Path): Path to save the outputs + Returns: + json file with array of file patterns + """ + data = json.loads('{"filePatterns": []}') + with open(os.path.join(outDir, "file_patterns.json"), "w") as cwlout: + for key, value in x.items(): + data["filePatterns"].append(key) + json.dump(data, cwlout) + + return + + +def main( + inpDir: Path, + pattern: str, + chunkSize: int, + groupBy: str, + outDir: Path, +): + + starttime = time.time() + + # If the pattern isn't given, try to infer one + if pattern is None: + try: + pattern = filepattern.infer_pattern([f.name for f in inpDir.iterdir()]) + except ValueError: + logger.error( + "Could not infer a filepattern from the input files, " + + "and no filepattern was provided." + ) + raise + + assert inpDir.exists(), logger.info("Input directory does not exist") + + logger.info("Finding best grouping...") + groupBy, count = get_grouping(inpDir, pattern, groupBy, chunkSize) + + logger.info("Generating filepatterns...") + fp = filepattern.FilePattern(inpDir, pattern) + fps, counts = [], [] + for files in fp(group_by=groupBy): + fps.append(filepattern.infer_pattern([f["file"].name for f in files])) + fp_temp = filepattern.FilePattern(inpDir, fps[-1]) + counts.append(sum(len(f) for f in fp_temp)) + + assert sum(counts) == len([f for f in fp]) + + save_generator_outputs(dict(zip(fps, counts)), outDir) + + endtime = (time.time() - starttime) / 60 + logger.info(f"Total time taken to process all images: {endtime}") + + +if __name__ == "__main__": + + # Import environment variables + POLUS_LOG = getattr(logging, os.environ.get("POLUS_LOG", "INFO")) + + # Initialize the logger + logging.basicConfig( + format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", + datefmt="%d-%b-%y %H:%M:%S", + ) + logger = logging.getLogger("main") + logger.setLevel(POLUS_LOG) + + # Argument parsing + logger.info("Parsing arguments...") + parser = argparse.ArgumentParser( + prog="main", description="Filepattern generator Plugin" + ) + # Input arguments + parser.add_argument( + "--inpDir", + dest="inpDir", + type=str, + help="Input image collection to be processed by this plugin", + required=True, + ) + parser.add_argument( + "--pattern", + dest="pattern", + type=str, + help="Filepattern regex used to parse image files", + required=False, + ) + parser.add_argument( + "--chunkSize", + dest="chunkSize", + type=int, + default=30, + help="Select chunksize for generating Filepattern from collective image set", + required=False, + ) + parser.add_argument( + "--groupBy", + dest="groupBy", + type=str, + help="Select a parameter to generate Filepatterns in specific order", + required=False, + ) + parser.add_argument( + "--outDir", dest="outDir", type=str, help="Output collection", required=True + ) + + # Parse the arguments + args = parser.parse_args() + inpDir = Path(args.inpDir) + + if inpDir.joinpath("images").is_dir(): + inpDir = inpDir.joinpath("images").absolute() + logger.info("inputDir = {}".format(inpDir)) + outDir = Path(args.outDir) + logger.info("outDir = {}".format(outDir)) + pattern = args.pattern + logger.info("pattern = {}".format(pattern)) + chunkSize = args.chunkSize + logger.info("chunkSize = {}".format(chunkSize)) + groupBy = args.groupBy + logger.info("groupBy = {}".format(groupBy)) + + main( + inpDir=inpDir, + pattern=pattern, + chunkSize=chunkSize, + groupBy=groupBy, + outDir=outDir, + ) diff --git a/utils/filepattern-generator-plugin/src/requirements.txt b/utils/filepattern-generator-plugin/src/requirements.txt new file mode 100644 index 0000000..aae7cb2 --- /dev/null +++ b/utils/filepattern-generator-plugin/src/requirements.txt @@ -0,0 +1 @@ +filepattern==1.4.7 diff --git a/utils/filepattern-generator-plugin/tests/test_main.py b/utils/filepattern-generator-plugin/tests/test_main.py new file mode 100644 index 0000000..caeae1f --- /dev/null +++ b/utils/filepattern-generator-plugin/tests/test_main.py @@ -0,0 +1,43 @@ +from pathlib import Path +import os, sys + +dirpath = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(os.path.join(dirpath, "../")) +import unittest +from src.main import * +import json + +inpDir = Path(dirpath).parent.joinpath("images") +outDir = Path(dirpath).parent.joinpath("out") +pattern = "p0{r}_x{x+}_y{y+}_wx{t}_wy{p}_c{c}.ome.tif" +chunkSize = 9 +filename = "pattern_generator" +data = { + "p00_x01_y{rr}_wx0_wy0_c{t}.ome.tif": 30, + "p00_x01_y{rr}_wx0_wy1_c{t}.ome.tif": 30, + "p00_x01_y{rr}_wx0_wy2_c{t}.ome.tif": 30, +} + + +class Test_Filepattern_Generator(unittest.TestCase): + def setUp(self) -> None: + + self.inpDir = inpDir + self.pattern = pattern + self.chunkSize = chunkSize + self.filename = filename + self.outDir = outDir + self.data = data + + def test_generated_json_output(self): + save_generator_outputs(self.data, outDir) + with open(outDir.joinpath("file_patterns.json"), "r") as read_file: + data = json.load(read_file) + file_pattern = data["filePatterns"] + self.assertTrue(file_pattern[0] == "p00_x01_y{rr}_wx0_wy0_c{t}.ome.tif") + self.assertTrue(file_pattern[1] == "p00_x01_y{rr}_wx0_wy1_c{t}.ome.tif") + self.assertTrue(file_pattern[2] == "p00_x01_y{rr}_wx0_wy2_c{t}.ome.tif") + + +if __name__ == "__main__": + unittest.main() diff --git a/utils/filepattern-generator-plugin/tests/version_test.py b/utils/filepattern-generator-plugin/tests/version_test.py new file mode 100644 index 0000000..25f0ea2 --- /dev/null +++ b/utils/filepattern-generator-plugin/tests/version_test.py @@ -0,0 +1,46 @@ +import unittest, json +from pathlib import Path +import urllib.request as request + + +class VersionTest(unittest.TestCase): + """Verify VERSION is correct""" + + version_path = Path(__file__).parent.parent.joinpath("VERSION") + json_path = Path(__file__).parent.parent.joinpath("plugin.json") + url = "https://hub.docker.com/repository/docker/polusai/filepattern-generator-plugin/tags?page=1&ordering=last_updated" + + def test_plugin_manifest(self): + """Tests VERSION matches the version in the plugin manifest""" + + # Get the plugin version + with open(self.version_path, "r") as file: + version = file.readline() + + # Load the plugin manifest + with open(self.json_path, "r") as file: + plugin_json = json.load(file) + + self.assertEqual(plugin_json["version"], version) + self.assertTrue(plugin_json["containerId"].endswith(version)) + + def test_docker_hub(self): + """Tests VERSION matches the latest docker container tag""" + + # Get the plugin version + with open(self.version_path, "r") as file: + version = file.readline() + + response = json.load(request.urlopen(self.url)) + if len(response["results"]) == 0: + self.fail( + "Could not find repository or no containers are in the repository." + ) + latest_tag = json.load(response)["results"][0]["name"] + + self.assertEqual(latest_tag, version) + + +if __name__ == "__main__": + + unittest.main() diff --git a/utils/polus-csv-collection-merger/Dockerfile b/utils/polus-csv-collection-merger/Dockerfile new file mode 100644 index 0000000..9137b85 --- /dev/null +++ b/utils/polus-csv-collection-merger/Dockerfile @@ -0,0 +1,4 @@ +FROM alpine +COPY VERSION / +COPY script.sh script.sh +ENTRYPOINT ["sh", "script.sh"] \ No newline at end of file diff --git a/utils/polus-csv-collection-merger/README.md b/utils/polus-csv-collection-merger/README.md new file mode 100644 index 0000000..ea885e5 --- /dev/null +++ b/utils/polus-csv-collection-merger/README.md @@ -0,0 +1,45 @@ +# Polus CSV Collection Merger Plugin + +This plugin helps to merge multiple CSV Collections in WIPP into one collection for later analysis. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +**This plugin is in development and is subject for change** + +## Options + +This plugin takes four input parameters and one output parameter: + +| Name | Description | I/O | WIPP Type | +|----------------------|------------------------------------------------|--------|---------------| +| `input-collection-a` | Input CSV collection A | Input | csvCollection | +| `input-collection-b` | Input CSV collection B | Input | csvCollection | +| `append-a` | Option to append collection ID to files from A | Input | boolean | +| `append-b` | Option to append collection ID to files from B | Input | boolean | +| `output` | Output CSV collection | Output | csvCollection | + +## Build the plugin + +```bash +docker build . -t labshare/polus-csv-collection-merger:0.1.1 +``` + + +## Run the plugin + +### Manually + +To test, create 3 folders: `` and `` should contain csv collections you would like to merge. `` is the target folder which will contain the merged files. + +Run the docker container +```bash +docker run -v :/a \ + -v :/b \ + -v :/c \ + labshare/polus-csv-collection-merger:0.1.1 \ + --input-collection-a /a \ + --input-collection-b /b \ + --append-a 'true' \ + --append-b 'true' \ + --output /c +``` \ No newline at end of file diff --git a/utils/polus-csv-collection-merger/VERSION b/utils/polus-csv-collection-merger/VERSION new file mode 100644 index 0000000..6da28dd --- /dev/null +++ b/utils/polus-csv-collection-merger/VERSION @@ -0,0 +1 @@ +0.1.1 \ No newline at end of file diff --git a/utils/polus-csv-collection-merger/csvcollectionsmerger.cwl b/utils/polus-csv-collection-merger/csvcollectionsmerger.cwl new file mode 100644 index 0000000..fb0684d --- /dev/null +++ b/utils/polus-csv-collection-merger/csvcollectionsmerger.cwl @@ -0,0 +1,28 @@ +class: CommandLineTool +cwlVersion: v1.2 +inputs: + append-a: + inputBinding: + prefix: --append-a + type: boolean? + append-b: + inputBinding: + prefix: --append-b + type: boolean? + input-collection-a: + inputBinding: + prefix: --input-collection-a + type: Directory + input-collection-b: + inputBinding: + prefix: --input-collection-b + type: Directory + output: + inputBinding: + prefix: --output + type: Directory +outputs: + output: !!python/name:builtins.NotImplementedError '' +requirements: + DockerRequirement: + dockerPull: polusai/csv-collection-merger:0.1.2 diff --git a/utils/polus-csv-collection-merger/ict.yaml b/utils/polus-csv-collection-merger/ict.yaml new file mode 100644 index 0000000..b6b2e70 --- /dev/null +++ b/utils/polus-csv-collection-merger/ict.yaml @@ -0,0 +1,61 @@ +author: +- Konstantin taletskiy +contact: konstantin.taletskiy@labshare.org +container: polusai/csv-collection-merger:0.1.2 +description: Merge two csv collections. You have an option to prepend collection name + to avoid name conflicts. +entrypoint: '[python3, main.py]' +inputs: +- description: Input csv collection A. + format: + - input-collection-a + name: input-collection-a + required: true + type: path +- description: Append collection name to collection A. + format: + - append-a + name: append-a + required: false + type: boolean +- description: Input csv collection B. + format: + - input-collection-b + name: input-collection-b + required: true + type: path +- description: Append collection name to collection B. + format: + - append-b + name: append-b + required: false + type: boolean +name: polusai/CSVcollectionsmerger +outputs: +- description: Output csv collection for the plugin + format: + - output + name: output + required: true + type: path +repository: https://github.com/polusai/image-tools +specVersion: 1.0.0 +title: CSV collections merger +ui: +- description: Pick a collection... + key: inputs.input-collection-a + title: 'CSV Collection A: ' + type: path +- description: Pick an option... + key: inputs.append-a + title: 'Append collection name to filenames in A: ' + type: checkbox +- description: Pick a collection... + key: inputs.input-collection-b + title: 'CSV Collection B: ' + type: path +- description: Pick an option... + key: inputs.append-b + title: 'Append collection name to filenames in B: ' + type: checkbox +version: 0.1.2 diff --git a/utils/polus-csv-collection-merger/plugin.json b/utils/polus-csv-collection-merger/plugin.json new file mode 100644 index 0000000..d777c0c --- /dev/null +++ b/utils/polus-csv-collection-merger/plugin.json @@ -0,0 +1,61 @@ +{ + "name": "CSV collections merger", + "version": "0.1.2", + "title": "CSV collections merger", + "description": "Merge two csv collections. You have an option to prepend collection name to avoid name conflicts.", + "author": "Konstantin taletskiy (konstantin.taletskiy@labshare.org)", + "containerId": "polusai/csv-collection-merger:0.1.2", + "inputs": [ + { + "name": "input-collection-a", + "type": "csvCollection", + "description": "Input csv collection A." + }, + { + "name": "append-a", + "type": "boolean", + "required": "false", + "description": "Append collection name to collection A." + }, + { + "name": "input-collection-b", + "type": "csvCollection", + "description": "Input csv collection B." + }, + { + "name": "append-b", + "type": "boolean", + "required": "false", + "description": "Append collection name to collection B." + } + ], + "outputs": [ + { + "name": "output", + "type": "csvCollection", + "description": "Output csv collection for the plugin" + } + ], + "ui": [ + { + "key": "inputs.input-collection-a", + "title": "CSV Collection A: ", + "description": "Pick a collection..." + }, + { + "key": "inputs.append-a", + "title": "Append collection name to filenames in A: ", + "description": "Pick an option..." + }, + { + "key": "inputs.input-collection-b", + "title": "CSV Collection B: ", + "description": "Pick a collection..." + }, + { + "key": "inputs.append-b", + "title": "Append collection name to filenames in B: ", + "description": "Pick an option..." + } + ] +} \ No newline at end of file diff --git a/utils/polus-csv-collection-merger/script.sh b/utils/polus-csv-collection-merger/script.sh new file mode 100644 index 0000000..646306d --- /dev/null +++ b/utils/polus-csv-collection-merger/script.sh @@ -0,0 +1,61 @@ +#!/bin/sh + +while [[ $# -gt 0 ]] +do +key="$1" + +case $key in + --input-collection-a) + INPUT_A="$2" + shift # past argument + shift # past value + ;; + --input-collection-b) + INPUT_B="$2" + shift # past argument + shift # past value + ;; + --append-a) + APPEND_A="$2" + shift # past argument + shift # past value + ;; + --append-b) + APPEND_B="$2" + shift # past argument + shift # past value + ;; + --output) + OUTPUT="$2" + shift # past argument + shift # past value + ;; +esac +done + +echo "INPUT COLLECTION A = ${INPUT_A}" +echo "INPUT COLLECTION B = ${INPUT_B}" +echo "APPEND A = ${APPEND_A}" +echo "APPEND B = ${APPEND_B}" +echo "OUTPUT = ${OUTPUT}" + +COLLECTION_A="$(basename $INPUT_A)" +COLLECTION_B="$(basename $INPUT_B)" +echo " " + +echo "Copying files from collection A ($COLLECTION_A):" +for f in $INPUT_A/*; do echo "$(basename $f)"; done +if [ "$APPEND_A" = "true" ]; then + for f in $INPUT_A/*; do cp "$f" "$OUTPUT"/"$COLLECTION_A"_"$(basename $f)"; done +else + for f in $INPUT_A/*; do cp "$f" "$OUTPUT"/"$(basename $f)"; done +fi +echo " " + +echo "Copying files from collection B ($COLLECTION_B):" +for f in $INPUT_B/*; do echo "$(basename $f)"; done +if [ "$APPEND_B" = "true" ]; then + for f in $INPUT_B/*; do cp "$f" "$OUTPUT"/"$COLLECTION_B"_"$(basename $f)"; done +else + for f in $INPUT_B/*; do cp "$f" "$OUTPUT"/"$(basename $f)"; done +fi \ No newline at end of file diff --git a/utils/polus-python-template/.bumpversion.cfg b/utils/polus-python-template/.bumpversion.cfg new file mode 100644 index 0000000..cdd5e56 --- /dev/null +++ b/utils/polus-python-template/.bumpversion.cfg @@ -0,0 +1,23 @@ +[bumpversion] +current_version = 1.1.0-dev0 +commit = False +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:README.md] diff --git a/utils/polus-python-template/.gitignore b/utils/polus-python-template/.gitignore new file mode 100644 index 0000000..d27abdc --- /dev/null +++ b/utils/polus-python-template/.gitignore @@ -0,0 +1 @@ +poetry.lock \ No newline at end of file diff --git a/utils/polus-python-template/CHANGELOG.md b/utils/polus-python-template/CHANGELOG.md new file mode 100644 index 0000000..489dc11 --- /dev/null +++ b/utils/polus-python-template/CHANGELOG.md @@ -0,0 +1,9 @@ +# CHANGELOG + +# 1.0.0 + +* Generate plugins from templates using cookiecutter. + +# 1.1.0 + +* Generate plugins following updated [standard guidelines](https://labshare.atlassian.net/wiki/spaces/WIPP/pages/3275980801/Python+Plugin+Standards) diff --git a/utils/polus-python-template/README.md b/utils/polus-python-template/README.md new file mode 100644 index 0000000..0218683 --- /dev/null +++ b/utils/polus-python-template/README.md @@ -0,0 +1,112 @@ +# WIPP Plugin Cookie Cutter (for Python) (1.1.0-dev0) + +This repository is a cookie cutter template that creates the basic scaffold structure of a +polus plugin and add it to the polus plugins directory structure. + +## How to use +1. Clone `polus-plugins` and change to the polus-plugins directory +2. `cd /utils/polus-python-template/` +3. (optional) Install poetry if not available. +4. (optional) Create a dedicated environment with conda or venv. +5. Install the dependencies: `poetry install` +6. Ignore changes to `cookiecutter.json` using: `git update-index --assume-unchanged cookiecutter.json` +7. Modify `cookiecutter.json` to include author and plugin information.`plugin_package` should always start with `polus.plugins`. +** NOTE: ** Do not edit values in brackets ({}) as they are edited by cookiecutter directly. +Those are automatically generated from the previous entries. If your plugin is called +"Awesome Function", then the plugin folder and docker container will have the name `awesome-function-plugin`. +8. Create your plugin skeleton: ` python -m cookiecutter . --no-input` + + +## Plugin Standard +The generated plugin will be compatible with polus most up-to-date guidelines : +see [standard guidelines](https://labshare.atlassian.net/wiki/spaces/WIPP/pages/3275980801/Python+Plugin+Standards) + +The code generated provides out-of-box support for : + - customizing the plugin code. + - implementing tests. + - creating and running a container. + - managing versioning. + - updating documentation (README, CHANGELOG). + - maintaining a WIPP manifest (plugin.json). + + +## Executing the plugin + +The plugin should be run as a package. +To install the package : + +`pip install .` + +The skeleton code can be run this way : +From the plugin's top directory (with the default values): + +`python -m polus.plugins1.package1.package2.awesome_function -i /tmp/inp -o /tmp/out` + +This should print some logs with the provided inputs and outputs and return. + +## Running tests +Plugin's developer should use `pytest`. +Some simple tests have been added to the template as examples. +Before submitting a PR to `polus-plugins`, other unit tests should be created and added to the `tests` +directory. + +To run tests : + +From the plugin's top directory, type `python -m pytest`. +Depending on how you have set up your environment, you may be able to run the pytest cli directly `pytest`. See pytest doc for how the project source directory is scanned to collect tests. +This should run a test successfully and return. + + +## Creating and running a container + +` ./build-docker.sh && ./run-plugin.sh` + +Build the docker image and run the container. + +### DockerFile +A docker image is build from a base image with common dependencies pre-installed. +The image entrypoint will run the plugin's package entrypoint. + +### build-docker.sh +Run this script to build the container. + +### run-plugin.sh +Run the container locally. + + +## Customize the plugin + +### Project code + +A set of common dependencies are added to `pyproject.toml`. +Update according to your needs. + +### Managing versioning + +Making sure that the file version is consistent across files in a plugin can be +challenging, so the Python template now uses +[bump2version](https://github.com/c4urself/bump2version) +to help manage versioning. This automatically changes the `VERSION` and +`plugin.json` files to the next version, preventing you from having to remember +to change the version everywhere. The `bumpversion.cfg` can be modified to +change the version in other files as well. + +To use this feature: +`bump2version --config-file bumpversion.cfg` + +### Documentation + +#### README.md + +A basic description of what the plugin does. This should define all the inputs +and outputs. + +#### CHANGELOG.md + +Documents updates made to the plugin. + + +### WIPP manifest (plugin.json). + +This file defines the input and output variables for WIPP, and defines the UI +components showed to the user. diff --git a/utils/polus-python-template/VERSION b/utils/polus-python-template/VERSION new file mode 100644 index 0000000..3018fdc --- /dev/null +++ b/utils/polus-python-template/VERSION @@ -0,0 +1 @@ +1.1.0-dev0 diff --git a/utils/polus-python-template/cookiecutter.json b/utils/polus-python-template/cookiecutter.json new file mode 100644 index 0000000..030f8cf --- /dev/null +++ b/utils/polus-python-template/cookiecutter.json @@ -0,0 +1,16 @@ +{ + "author": "Data Scientist", + "author_email": "data.scientist@labshare.org", + "plugin_name": "Awesome Plugin", + "plugin_package": "polus.plugins.package1.package2.awesome_function", + "plugin_description": "An awesome function.", + "plugin_version": "0.1.0", + + "package_folders": "{%set folders = cookiecutter.plugin_package.replace('.', '/') %}{{folders}}", + "package_name": "{% set packages = cookiecutter.plugin_package.split('.') %}{{ packages | last }}", + "project_name": "{% set project_name = cookiecutter.plugin_package.replace('_', '-').replace('.', '-') %}{{ project_name }}", + "plugin_slug": "{% set plugin_slug = cookiecutter.package_name.replace('_', '-') %}polus-{{plugin_slug}}-plugin", + "container_name": "{% set container_name = ('-').join(cookiecutter.plugin_slug.split('-')[1:])%}{{ container_name }}", + "container_id": "polusai/{{cookiecutter.container_name}}", + "container_version": "{{cookiecutter.plugin_version}}" +} diff --git a/utils/polus-python-template/hooks/post_gen_project.py b/utils/polus-python-template/hooks/post_gen_project.py new file mode 100644 index 0000000..f3f0ee4 --- /dev/null +++ b/utils/polus-python-template/hooks/post_gen_project.py @@ -0,0 +1,63 @@ +import os +import shutil +from pathlib import Path +import logging +from os import environ + +logging.basicConfig( + format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", + datefmt="%d-%b-%y %H:%M:%S", +) +POLUS_LOG = getattr(logging, environ.get("POLUS_LOG", "DEBUG")) +logger = logging.getLogger("polus-python-template-post") +logger.setLevel(POLUS_LOG) + + +def create_repository_directories(source_dir): + """ Buid the correct directories inside polus-plugins. + The directory structure must conforms to the plugin's spec : + - dash-separated word in identifier. + - folder hierarchy matches package namespace minus "polus.plugins" + - plugin's folder name reflects the plugin package name but ends with "-plugin" + Ex: polus.plugins.package1.package2.awesome_function becomes + package1/package2/awesome-function-plugin + """ + + # try to find the project's root, otherwise we stay in the + # staging directory + final_dir = source_dir.parent + for folder in Path(final_dir).parent.parents: + if os.path.exists(folder / ".git"): + final_dir = folder + break + + # by default we create a plugin directory at the root + target_dir = final_dir + + # figure out if additional directories need to be created at the root + # make sure we replace underscores + new_dirs = "{{cookiecutter.plugin_package}}".replace("_", "-") + new_dirs = new_dirs.split(".") + # remove polus.plugins so we only keep intermediary directories + # Ex: polus.plugins.package1.package2.awesome_function creates + # package1/package2/ + new_dirs = new_dirs[2:-1] + if len(new_dirs) != 0: + package_dir = os.path.join(*new_dirs) + target_dir = final_dir / package_dir + + # create the plugin directory + os.makedirs(target_dir, exist_ok=True) + + return target_dir + + +def move_project_source_to_final_location(): + """Move staged files to the the final target repo.""" + source_dir = Path(os.getcwd()) + target_dir = create_repository_directories(source_dir) + logger.debug(f"moving sources from {source_dir} to {target_dir}") + shutil.move(source_dir, target_dir) + +# NOTE do not create folder structure with the repo at the moment. +# move_project_source_to_final_location() \ No newline at end of file diff --git a/utils/polus-python-template/hooks/pre_gen_project.py b/utils/polus-python-template/hooks/pre_gen_project.py new file mode 100644 index 0000000..802f5d1 --- /dev/null +++ b/utils/polus-python-template/hooks/pre_gen_project.py @@ -0,0 +1,55 @@ +""" +Validate of template variables before templating the project +""" +import logging +from os import environ + +logging.basicConfig( + format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", + datefmt="%d-%b-%y %H:%M:%S", +) +POLUS_LOG = getattr(logging, environ.get("POLUS_LOG", "DEBUG")) +logger = logging.getLogger("polus-python-template-pre") +logger.setLevel(POLUS_LOG) + +# NOTE Those validation could be performed on a plugin.json +# using polus plugins pydantic models. + +author = "{{ cookiecutter.author }}" +# TODO check valid + +author_email = "{{ cookiecutter.author_email }}" +## TODO check valid + +plugin_package = "{{ cookiecutter.plugin_package }}" +if not plugin_package.startswith("polus.plugins."): + raise ValueError( + f"plugin package must be a child of polus.plugins." + + f"plugin_package must start with 'polus.plugins'. Got : {plugin_package}" + ) +if plugin_package.endswith("_plugin"): + raise ValueError( + f"plugin_package must not ends with _plugin. Got : {plugin_package}" + ) + +# TODO check we have a valid python package name + +plugin_version = "{{ cookiecutter.plugin_version }}" +# TODO check version is valid + +project_name = "{{ cookiecutter.project_name }}" +assert not ("_" in project_name) and not ("." in project_name) + +plugin_slug = "{{ cookiecutter.plugin_slug }}" +assert plugin_slug.startswith("polus-") and plugin_slug.endswith("-plugin") + +container_name = "{{ cookiecutter.container_name }}" +assert container_name.endswith("-plugin") + +container_id = "{{ cookiecutter.container_id }}" +assert container_id.startswith("polusai/") + +container_version = "{{ cookiecutter.container_version }}" +assert container_version == plugin_version + +logger.debug(f"plugin_package: {plugin_package}" ) diff --git a/utils/polus-python-template/pyproject.toml b/utils/polus-python-template/pyproject.toml new file mode 100644 index 0000000..0052037 --- /dev/null +++ b/utils/polus-python-template/pyproject.toml @@ -0,0 +1,32 @@ +[tool.poetry] +name = "polus-python-template" +version = "1.1.0-dev0" +description = "" +authors = ["Nick Schaub ", "Antoine Gerardin "] +readme = "README.md" +packages = [{include = "polus_python_template"}] + + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" + +[tool.poetry.group.dev.dependencies] +cookiecutter = "1.7.2" +jinja2_ospath = "0.3.0" +bump2version = "^1.0.1" +pytest = "^7.4" +pytest-sugar = "^0.9.6" +pre-commit = "^3.2.1" +black = "^23.3.0" +mypy = "^1.1.1" +ruff = "^0.0.270" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + + +[tool.pytest.ini_options] +pythonpath = [ + "." +] diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/.bumpversion.cfg b/utils/polus-python-template/{{cookiecutter.container_name}}/.bumpversion.cfg new file mode 100644 index 0000000..ae20e5c --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/.bumpversion.cfg @@ -0,0 +1,29 @@ +[bumpversion] +current_version = {{ cookiecutter.plugin_version }} +commit = False +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:VERSION] + +[bumpversion:file:README.md] + +[bumpversion:file:plugin.json] + +[bumpversion:file:src/{{cookiecutter.package_folders}}/__init__.py] diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/.dockerignore b/utils/polus-python-template/{{cookiecutter.container_name}}/.dockerignore new file mode 100644 index 0000000..7c603f8 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/.dockerignore @@ -0,0 +1,4 @@ +.venv +out +tests +__pycache__ diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/.gitignore b/utils/polus-python-template/{{cookiecutter.container_name}}/.gitignore new file mode 100644 index 0000000..c04bc49 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/.gitignore @@ -0,0 +1 @@ +poetry.lock diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/CHANGELOG.md b/utils/polus-python-template/{{cookiecutter.container_name}}/CHANGELOG.md new file mode 100644 index 0000000..ca292da --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/CHANGELOG.md @@ -0,0 +1,5 @@ +# CHANGELOG + +## {{cookiecutter.container_version}} + +Initial release. diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile b/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile new file mode 100644 index 0000000..dc889b0 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile @@ -0,0 +1,26 @@ +FROM polusai/bfio:2.1.9 + +# environment variables defined in polusai/bfio +# ENV EXEC_DIR="/opt/executables" +# ENV DATA_DIR="/data" +# ENV POLUS_EXT=".ome.tif" +# Change to WARNING for fewer logs, and DEBUG for debugging +ENV POLUS_LOG="INFO" + +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".csv" + +# Work directory defined in the base container +# WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +COPY CHANGELOG.md ${EXEC_DIR} +COPY src ${EXEC_DIR}/src + +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +# Default command. Additional arguments are provided through the command line +ENTRYPOINT ["python3", "-m", "{{cookiecutter.plugin_package}}"] +CMD ["--help"] diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/README.md b/utils/polus-python-template/{{cookiecutter.container_name}}/README.md new file mode 100644 index 0000000..f99b4a8 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/README.md @@ -0,0 +1,23 @@ +# {{cookiecutter.plugin_name}} ({{cookiecutter.plugin_version}}) + +{{cookiecutter.plugin_description}} + +## Building + +To build the Docker image for the conversion plugin, run `./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the +contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes 2 input arguments and 1 output argument: + +| Name | Description | I/O | Type | Default +|---------------|-------------------------|--------|--------| +| inpDir | Input image collection to be processed by this plugin | Input | collection +| filePattern | Filename pattern used to separate data | Input | string | .* +| preview | Generate an output preview | Input | boolean | False +| outDir | Output collection | Output | collection diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/VERSION b/utils/polus-python-template/{{cookiecutter.container_name}}/VERSION new file mode 100644 index 0000000..6c21993 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/VERSION @@ -0,0 +1 @@ +{{ cookiecutter.plugin_version }} diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/build-docker.sh b/utils/polus-python-template/{{cookiecutter.container_name}}/build-docker.sh new file mode 100755 index 0000000..cf00ccc --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$("] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" +bfio = {version = ">=2.3.3,<3.0", extras = ["all"]} +filepattern = ">=2.0.4,<3.0" +preadator = "^0.4.0.dev2" +typer = "^0.7.0" + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +pytest = "^7.4" +pytest-sugar = "^0.9.6" +pre-commit = "^3.2.1" +black = "^23.3.0" +mypy = "^1.1.1" +ruff = "^0.0.270" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" + +[tool.pytest.ini_options] +pythonpath = [ + "." +] diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/run-plugin.sh b/utils/polus-python-template/{{cookiecutter.container_name}}/run-plugin.sh new file mode 100755 index 0000000..d979d07 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/run-plugin.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +version=$( None: + """Generate preview of the plugin outputs.""" + + preview = {} + + with Path.open(out_dir / "preview.json", "w") as fw: + json.dump(preview, fw, indent=2) + +@app.command() +def main( + inp_dir: Path = typer.Option( + ..., + "--inpDir", + "-i", + help="Input directory to be processed.", + exists=True, + readable=True, + file_okay=False, + resolve_path=True, + ), + filepattern: str = typer.Option( + ".*", + "--filePattern", + "-f", + help="Filepattern used to filter inputs.", + ), + out_dir: Path = typer.Option( + ..., + "--outDir", + "-o", + help="Output directory.", + exists=True, + writable=True, + file_okay=False, + resolve_path=True, + ), + preview: bool = typer.Option( + False, + "--preview", + "-v", + help="Preview of expected outputs (dry-run)", + show_default=False, + ), +): + """{{cookiecutter.plugin_name}}.""" + logger.info(f"inpDir: {inp_dir}") + logger.info(f"filePattern: {filepattern}") + logger.info(f"outDir: {out_dir}") + + if preview: + generate_preview(inp_dir, out_dir) + logger.info(f"generating preview data in : {out_dir}.") + return + + {{cookiecutter.package_name}}(inp_dir, filepattern, out_dir) + + +if __name__ == "__main__": + app() diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/{{ cookiecutter.package_name }}.py b/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/{{ cookiecutter.package_name }}.py new file mode 100644 index 0000000..2573a72 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/{{ cookiecutter.package_name }}.py @@ -0,0 +1,16 @@ +"""{{ cookiecutter.plugin_name }}.""" + +from pathlib import Path + + +def {{cookiecutter.package_name}}(inp_dir: Path, filepattern: str, out_dir: Path): + """{{cookiecutter.plugin_name}}. + + Args: + inp_dir: input directory to process + filepattern: filepattern to filter inputs + out_dir: output directory + Returns: + None + """ + pass \ No newline at end of file diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/__init__.py b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/__init__.py new file mode 100644 index 0000000..28371ef --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/__init__.py @@ -0,0 +1 @@ +"""Tests for {{cookiecutter.package_name}}.""" diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/conftest.py b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/conftest.py new file mode 100644 index 0000000..fd0c321 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/conftest.py @@ -0,0 +1,147 @@ +"""Test fixtures. + +Set up all data used in tests. +""" +import tempfile +import shutil +from pathlib import Path +import numpy as np +import pytest +import itertools + +from bfio import BioWriter, BioReader + +def pytest_addoption(parser: pytest.Parser) -> None: + """Add options to pytest.""" + parser.addoption( + "--downloads", + action="store_true", + dest="downloads", + default=False, + help="run tests that download large data files", + ) + parser.addoption( + "--slow", + action="store_true", + dest="slow", + default=False, + help="run slow tests", + ) + + + + +IMAGE_SIZES = [(1024 * (2**i) ,1024 * (2**i)) for i in range(1, 2)] +LARGE_IMAGE_SIZES = [(1024 * (2**i) ,1024 * (2**i)) for i in range(4, 5)] +PIXEL_TYPES = [np.uint8, float] +PARAMS = [ + (image_size, pixel_type) + for image_size, pixel_type in itertools.product( + IMAGE_SIZES, PIXEL_TYPES + ) +] +LARGE_DATASET_PARAMS = [ + (image_size, pixel_type) + for image_size, pixel_type in itertools.product( + LARGE_IMAGE_SIZES, PIXEL_TYPES + ) +] + + +FixtureReturnType = tuple[ + Path, # input dir + Path, # output dir + Path, # ground truth path + Path, # input image path + Path, # ground truth path +] + + +@pytest.fixture(params=PARAMS) +def generate_test_data(request: pytest.FixtureRequest) -> FixtureReturnType: + """Generate staging temporary directories with test data and ground truth.""" + + # collect test params + image_size, pixel_type = request.param + test_data = _generate_test_data(image_size, pixel_type) + print(test_data) + yield from test_data + + +@pytest.fixture(params=LARGE_DATASET_PARAMS) +def generate_large_test_data(request: pytest.FixtureRequest) -> FixtureReturnType: + """Generate staging temporary directories with test data and ground truth.""" + + # collect test params + image_size, pixel_type = request.param + test_data =_generate_test_data(image_size, pixel_type) + + print(test_data) + + yield from test_data + + +def _generate_test_data(image_size : tuple[int,int], pixel_type: int) -> FixtureReturnType: + """Generate staging temporary directories with test data and ground truth.""" + + image_x, image_y = image_size + + # staging area + data_dir = Path(tempfile.mkdtemp(suffix="_data_dir")) + inp_dir = data_dir.joinpath("inp_dir") + inp_dir.mkdir(exist_ok=True) + out_dir = data_dir.joinpath("out_dir") + out_dir.mkdir(exist_ok=True) + ground_truth_dir = data_dir.joinpath("ground_truth_dir") + ground_truth_dir.mkdir(exist_ok=True) + + # generate image and ground_truth + img_path = inp_dir.joinpath("img.ome.tif") + image = gen_2D_image(img_path, image_x, image_y, pixel_type) + ground_truth_path = ground_truth_dir.joinpath("ground_truth.ome.tif") + gen_ground_truth(img_path, ground_truth_path) + + yield inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path + + shutil.rmtree(data_dir) + +def gen_2D_image( + img_path, + image_x, + image_y, + pixel_type +) : + """Generate a random 2D square image.""" + + if np.issubdtype(pixel_type, np.floating) : + rng = np.random.default_rng() + image = rng.uniform(0.0, 1.0, + size=(image_y, image_x) + ).astype(pixel_type) + else: + image = np.random.randint(0, 255, size=(image_y, image_x)) + + with BioWriter(img_path) as writer: + (y, x) = image.shape + writer.Y = y + writer.X = x + writer.Z = 1 + writer.C = 1 + writer.T = 1 + writer.dtype = image.dtype + writer[:] = image[:] + + return image + + +def gen_ground_truth(img_path : Path, ground_truth_path : Path): + """generate some ground truth from the image data. + Here we generate a simple binary mask. + """ + + with BioReader(img_path) as reader: + with BioWriter(ground_truth_path, metadata=reader.metadata) as writer: + ground_truth = np.asarray(reader[:] != 0) + writer[:] = ground_truth + + return ground_truth \ No newline at end of file diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_cli.py b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_cli.py new file mode 100644 index 0000000..1b51809 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_cli.py @@ -0,0 +1,96 @@ +"""Testing the Command Line Tool.""" + +import faulthandler +import json +from pathlib import Path +from typer.testing import CliRunner + +from .conftest import FixtureReturnType + +from {{cookiecutter.plugin_package}}.__main__ import app + +faulthandler.enable() + + +def test_cli(generate_test_data : FixtureReturnType) -> None: # noqa + """Test the command line.""" + inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_test_data #noqa + + runner = CliRunner() + + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--outDir", + out_dir, + ], + ) + + assert result.exit_code == 0 + +def test_cli_short(generate_test_data : FixtureReturnType): # noqa + """Test the command line.""" + runner = CliRunner() + + inp_dir, out_dir, _, _, _ = generate_test_data #noqa + + result = runner.invoke( + app, + [ + "-i", + inp_dir, + "-o", + out_dir, + ], + ) + + assert result.exit_code == 0 + +def test_cli_preview(generate_test_data : FixtureReturnType): # noqa + """Test the preview option.""" + runner = CliRunner() + + inp_dir, out_dir, _, _, _ = generate_test_data #noqa + + + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--outDir", + out_dir, + "--preview", + ], + ) + + assert result.exit_code == 0 + + with Path.open(out_dir / "preview.json") as file: + plugin_json = json.load(file) + + # verify we generate the preview file + assert plugin_json == {} + + +def test_cli_bad_input(generate_test_data : FixtureReturnType): # noqa + """Test bad inputs.""" + runner = CliRunner() + + inp_dir, out_dir, _, _, _ = generate_test_data #noqa + # replace with a bad path + inp_dir = "/does_not_exists" + + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--outDir", + out_dir, + ], + ) + + assert result.exc_info[0] is SystemExit diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_{{cookiecutter.package_name}}.py b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_{{cookiecutter.package_name}}.py new file mode 100644 index 0000000..75e3552 --- /dev/null +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_{{cookiecutter.package_name}}.py @@ -0,0 +1,22 @@ +"""Tests for {{cookiecutter.package_name}}.""" + +import pytest +from {{cookiecutter.plugin_package}}.{{cookiecutter.package_name}} import ( + {{cookiecutter.package_name}}, +) +from .conftest import FixtureReturnType + + +def test_{{cookiecutter.package_name}}(generate_test_data : FixtureReturnType): + """Test {{cookiecutter.package_name}}.""" + inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_test_data + filepattern = ".*" + assert {{cookiecutter.package_name}}(inp_dir, filepattern, out_dir) == None + + +@pytest.mark.skipif("not config.getoption('slow')") +def test_{{cookiecutter.package_name}}(generate_large_test_data : FixtureReturnType): + """Test {{cookiecutter.package_name}}.""" + inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_large_test_data + filepattern = ".*" + assert {{cookiecutter.package_name}}(inp_dir, filepattern, out_dir) == None \ No newline at end of file diff --git a/utils/polus-stitching-vector-merger-plugin/Dockerfile b/utils/polus-stitching-vector-merger-plugin/Dockerfile new file mode 100644 index 0000000..2733ed1 --- /dev/null +++ b/utils/polus-stitching-vector-merger-plugin/Dockerfile @@ -0,0 +1,4 @@ +FROM python:3.6-alpine +COPY VERSION / +COPY main.py main.py +ENTRYPOINT ["python3", "main.py"] \ No newline at end of file diff --git a/utils/polus-stitching-vector-merger-plugin/README.md b/utils/polus-stitching-vector-merger-plugin/README.md new file mode 100644 index 0000000..0c7e53a --- /dev/null +++ b/utils/polus-stitching-vector-merger-plugin/README.md @@ -0,0 +1,29 @@ +# Polus Stitching Vector Collection Merger Plugin + +This WIPP plugin merges stitching vector collections together. It takes as input a minimum of 2 collections upto a maximum of 5 collections. + + Contact [Gauhar Bains](mailto:gauhar.bains@labshare.org) for more information. + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes one input argument and one output argument: + +| Name | Description | I/O | Type | +| --------------------- | ------------------------------ | ------ | --------------- | +| `--VectorCollection1` | 1st stitchingVector Collection | Input | stitchingVector | +| `--VectorCollection2` | 2nd stitchingVector Collection | Input | stitchingVector | +| `--VectorCollection3` | 3rd stitchingVector Collection | Input | stitchingVector | +| `--VectorCollection4` | 4th stitchingVector Collection | Input | stitchingVector | +| `--VectorCollection5` | 5th stitchingVector Collection | Input | stitchingVector | +| `--outDir` | Output collection | Output | stitchingVector | diff --git a/utils/polus-stitching-vector-merger-plugin/VERSION b/utils/polus-stitching-vector-merger-plugin/VERSION new file mode 100644 index 0000000..84aa3a7 --- /dev/null +++ b/utils/polus-stitching-vector-merger-plugin/VERSION @@ -0,0 +1 @@ +0.1.8 \ No newline at end of file diff --git a/utils/polus-stitching-vector-merger-plugin/build-docker.sh b/utils/polus-stitching-vector-merger-plugin/build-docker.sh new file mode 100755 index 0000000..5515f86 --- /dev/null +++ b/utils/polus-stitching-vector-merger-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:VERSION] + +[bumpversion:file:README.md] + +[bumpversion:file:plugin.json] + +[bumpversion:file:src/polus/images/utils/rxiv_download/__init__.py] diff --git a/utils/rxiv-download-tool/.dockerignore b/utils/rxiv-download-tool/.dockerignore new file mode 100644 index 0000000..7c603f8 --- /dev/null +++ b/utils/rxiv-download-tool/.dockerignore @@ -0,0 +1,4 @@ +.venv +out +tests +__pycache__ diff --git a/utils/rxiv-download-tool/.gitignore b/utils/rxiv-download-tool/.gitignore new file mode 100644 index 0000000..c9c7ae7 --- /dev/null +++ b/utils/rxiv-download-tool/.gitignore @@ -0,0 +1,2 @@ +poetry.lock +out diff --git a/utils/rxiv-download-tool/Dockerfile b/utils/rxiv-download-tool/Dockerfile new file mode 100644 index 0000000..13e8242 --- /dev/null +++ b/utils/rxiv-download-tool/Dockerfile @@ -0,0 +1,20 @@ +FROM polusai/bfio:2.3.6 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".csv" +ENV POLUS_EXT=".xml" + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +COPY src ${EXEC_DIR}/src + +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +ENTRYPOINT ["python3", "-m", "polus.images.utils.rxiv_download"] +CMD ["--help"] diff --git a/utils/rxiv-download-tool/README.md b/utils/rxiv-download-tool/README.md new file mode 100644 index 0000000..5c90866 --- /dev/null +++ b/utils/rxiv-download-tool/README.md @@ -0,0 +1,30 @@ +# Rxiv Download (v0.1.0-dev0) + +This plugin allows to download data from open access archives. Currently this plugin supports downloading data from [arxiv](https://www.openarchives.org/). Later additional support for other archives will be added. + +## Building + +To build the Docker image for the download plugin, run +`bash build-docker.sh`. + +## Run the Docker image + +To execute the built docker image for the download plugin, run +`bash run-plugin.sh`. + +## Options + +This plugin takes 2 input arguments and +1 output argument: + +| Name | Description | I/O | Type | +| --------------- | ------------------------------------------------------------ | ------ | ----------- | +| `--rxiv ` | Download data from open access archives | Input | String | +| `--start ` | Start date | Input | String | +| `--outDir` | Directory to store the downloaded data | Output | genericData | +| `--preview` | Generate a JSON file with outputs | Output | JSON | + + + +## Sample docker command: +```docker run -v /home/ec2-user/data/:/home/ec2-user/data/ polusai/rxiv-download-tool:0.1.0-dev0 --rxiv="arXiv" --start='2023-2-16' --outDir=/home/ec2-user/data/output``` diff --git a/utils/rxiv-download-tool/VERSION b/utils/rxiv-download-tool/VERSION new file mode 100644 index 0000000..206c085 --- /dev/null +++ b/utils/rxiv-download-tool/VERSION @@ -0,0 +1 @@ +0.1.0-dev0 diff --git a/utils/rxiv-download-tool/build-docker.sh b/utils/rxiv-download-tool/build-docker.sh new file mode 100644 index 0000000..a5b03c6 --- /dev/null +++ b/utils/rxiv-download-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$( + +2023-12-18T17:52:12Z +http://export.arxiv.org/oai2 + + +

+ oai:arXiv.org:1007.1025 + 2023-12-18 + cs + physics:nlin +
+ + + Inflection system of a language as a complex network + Fukś, Henryk + Computer Science - Computation and Language + Nonlinear Sciences - Adaptation and Self-Organizing Systems + We investigate inflection structure of a synthetic language using Latin as an +example. We construct a bipartite graph in which one group of vertices +correspond to dictionary headwords and the other group to inflected forms +encountered in a given text. Each inflected form is connected to its +corresponding headword, which in some cases in non-unique. The resulting sparse +graph decomposes into a large number of connected components, to be called word +groups. We then show how the concept of the word group can be used to construct +coverage curves of selected Latin texts. We also investigate a version of the +inflection graph in which all theoretically possible inflected forms are +included. Distribution of sizes of connected components of this graphs +resembles cluster distribution in a lattice percolation near the critical +point. + + Comment: 6 pages, 9 figures + 2010-07-06 + text + http://arxiv.org/abs/1007.1025 + Proceedings of 2009 IEEE Toronto International Conference - + Science and Technology for Humanity, IEEE, Toronto 2009, pp. 491-496 + doi:10.1109/TIC-STH.2009.5444449 + + + + +
+ oai:arXiv.org:1007.1026 + 2023-12-18 + physics:nlin +
+ + + On the calibration of neural networks for histological slide-level + classification + Kurz, Alexander + Mehrtens, Hendrik A. + Bucher, Tabea-Clara + Brinker, Titus J. + Electrical Engineering and Systems Science - Image and Video Processing + Computer Science - Computer Vision and Pattern Recognition + Deep Neural Networks have shown promising classification performance when +predicting certain biomarkers from Whole Slide Images in digital pathology. +However, the calibration of the networks' output probabilities is often not +evaluated. Communicating uncertainty by providing reliable confidence scores is +of high relevance in the medical context. In this work, we compare three neural +network architectures that combine feature representations on patch-level to a +slide-level prediction with respect to their classification performance and +evaluate their calibration. As slide-level classification task, we choose the +prediction of Microsatellite Instability from Colorectal Cancer tissue +sections. We observe that Transformers lead to good results in terms of +classification performance and calibration. When evaluating the classification +performance on a separate dataset, we observe that Transformers generalize +best. The investigation of reliability diagrams provides additional insights to +the Expected Calibration Error metric and we observe that especially +Transformers push the output probabilities to extreme values, which results in +overconfident predictions. + + Comment: 7 pages, 2 figures, 2 tables + 2023-12-15 + text + http://arxiv.org/abs/2312.09719 + + +
+ +
+ oai:arXiv.org:2312.09720 + 2023-12-18 + eess +
+ + + RIS-Enabled NLoS Near-Field Joint Position and Velocity Estimation under + User Mobility + Rahal, Moustafa + Denis, Benoit + Keskin, Musa Furkan + Uguen, Bernard + Wymeersch, Henk + Electrical Engineering and Systems Science - Signal Processing + In the context of single-base station (BS) non-line-of-sight (NLoS) +single-epoch localization with the aid of a reflective reconfigurable +intelligent surface (RIS), this paper introduces a novel three-step algorithm +that jointly estimates the position and velocity of a mobile user equipment +(UE), while compensating for the Doppler effects observed in near-field (NF) at +the RIS elements over the short transmission duration of a sequence of downlink +(DL) pilot symbols. First, a low-complexity initialization procedure is +proposed, relying in part on far-field (FF) approximation and a static user +assumption. Then, an alternating optimization procedure is designed to +iteratively refine the velocity and position estimates, as well as the channel +gain. The refinement routines leverage small angle approximations and the +linearization of the RIS response, accounting for both NF and mobility effects. +We evaluate the performance of the proposed algorithm through extensive +simulations under diverse operating conditions with regard to signal-to-noise +ratio (SNR), UE mobility, uncontrolled multipath and RIS-UE distance. Our +results reveal remarkable performance improvements over the state-of-the-art +(SoTA) mobility-agnostic benchmark algorithm, while indicating convergence of +the proposed algorithm to respective theoretical bounds on position and +velocity estimation. + + Comment: 11 pages, 9 figures, journal + 2023-12-15 + text + http://arxiv.org/abs/2312.09720 + + +
+6905935|1001 + + diff --git a/utils/rxiv-download-tool/ict.yaml b/utils/rxiv-download-tool/ict.yaml new file mode 100644 index 0000000..2f8bb5b --- /dev/null +++ b/utils/rxiv-download-tool/ict.yaml @@ -0,0 +1,51 @@ +author: +- Nick Schaub +- Hamdah Shafqat +contact: nick.schaub@nih.gov +container: polusai/rxiv-download-tool:0.1.0-dev0 +description: This plugin allows to download data from Rxiv website. +entrypoint: python3 -m polus.images.utils.rxiv_download +inputs: +- description: Pull records from open access archives. + format: + - rxiv + name: rxiv + required: true + type: string +- description: Start date. + format: + - start + name: start + required: false + type: string +- description: Generate an output preview. + format: + - preview + name: preview + required: false + type: boolean +name: polusai/DownloadRxivtextdata +outputs: +- description: Output collection. + format: + - outDir + name: outDir + required: true + type: path +repository: https://github.com/PolusAI/image-tools +specVersion: 1.0.0 +title: Download Rxiv text data +ui: +- description: Pull records from open access archives. + key: inputs.rxiv + title: rxiv + type: text +- description: Start date. + key: inputs.start + title: start + type: text +- description: Generate an output preview. + key: inputs.preview + title: Preview example output of this plugin + type: checkbox +version: 0.1.0-dev0 diff --git a/utils/rxiv-download-tool/plugin.json b/utils/rxiv-download-tool/plugin.json new file mode 100644 index 0000000..1954acc --- /dev/null +++ b/utils/rxiv-download-tool/plugin.json @@ -0,0 +1,70 @@ +{ + "name": "Rxiv-Download", + "version": "0.1.0-dev0", + "title": "Download Rxiv text data", + "description": "This plugin allows to download data from Rxiv website.", + "author": "Nick Schaub (nick.schaub@nih.gov), Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov)", + "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/PolusAI/image-tools", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": "", + "containerId": "polusai/rxiv-download-tool:0.1.0-dev0", + "baseCommand": [ + "python3", + "-m", + "polus.images.utils.rxiv_download" + ], + "inputs": [ + { + "name": "rxiv", + "type": "string", + "title": "rxiv", + "description": "Pull records from open access archives.", + "required": "True" + }, + { + "name": "start", + "type": "string", + "title": "start", + "description": "Start date.", + "required": "False" + }, + { + "name": "preview", + "type": "boolean", + "title": "Preview", + "description": "Generate an output preview.", + "required": "False" + } + ], + "outputs": [ + { + "name": "outDir", + "type": "genericData", + "description": "Output collection." + } + ], + "ui": [ + { + "key": "inputs.rxiv", + "type": "string", + "title": "rxiv", + "description": "Pull records from open access archives.", + "required": "True" + }, + { + "key": "inputs.start", + "type": "string", + "title": "start", + "description": "Start date.", + "required": "False" + }, + { + "key": "inputs.preview", + "type": "boolean", + "title": "Preview example output of this plugin", + "description": "Generate an output preview.", + "required": "False" + } + ] +} diff --git a/utils/rxiv-download-tool/pyproject.toml b/utils/rxiv-download-tool/pyproject.toml new file mode 100644 index 0000000..5920838 --- /dev/null +++ b/utils/rxiv-download-tool/pyproject.toml @@ -0,0 +1,41 @@ +[tool.poetry] +name = "polus-images-utils-rxiv-download" +version = "0.1.0-dev0" +description = "Fetch text data from rxiv" +authors = [ + "Nick Schaub ", + "Hamdah Shafqat abbasi " + ] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" +typer = "^0.7.0" +requests = "^2.31.0" +rxiv-types = "^0.1.0" +tqdm = "^4.66.1" +xmltodict = "^0.13.0" +pydantic = "1.10.4" + + +[[tool.poetry.source]] +name = "test" +url = "https://test.pypi.org/simple/" +default = false +secondary = true + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +flake8 = "^6.0.0" +pre-commit = "^3.2.1" +flake8-docstrings = "^1.7.0" +black = "^23.3.0" +mypy = "^1.1.1" +pytest = "^7.2.2" +ruff = "^0.0.270" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/utils/rxiv-download-tool/run-plugin.sh b/utils/rxiv-download-tool/run-plugin.sh new file mode 100644 index 0000000..48c596a --- /dev/null +++ b/utils/rxiv-download-tool/run-plugin.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +version=$( None: + """Scaled Nyxus plugin allows to extract features from labelled images.""" + logger.info(f"--rxiv = {rxiv}") + logger.info(f"--start = {start}") + logger.info(f"--outDir = {out_dir}") + + if start is not None: + start_date = datetime.strptime(start, "%Y-%m-%d").date() + + out_dir = out_dir.resolve() + + if not out_dir.exists(): + out_dir.mkdir(exist_ok=True) + + assert out_dir.exists(), f"{out_dir} does not exist!! Please check input path again" + + model = ArxivDownload(path=out_dir, rxiv=rxiv, start=start_date) + model.fetch_and_save_records() + + if preview: + generate_preview(out_dir) + logger.info(f"generating preview data in {out_dir}") + + +if __name__ == "__main__": + app() diff --git a/utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/fetch.py b/utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/fetch.py new file mode 100644 index 0000000..b198627 --- /dev/null +++ b/utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/fetch.py @@ -0,0 +1,217 @@ +"""Rxiv Download Plugin.""" +import json +import logging +import os +import shutil +from concurrent.futures import ProcessPoolExecutor +from datetime import datetime +from io import BytesIO +from pathlib import Path +from typing import Optional + +import requests +import xmltodict +from rxiv_types import arxiv_records +from rxiv_types.models.oai_pmh.org.openarchives.oai.pkg_2.resumption_token_type import ( + ResumptionTokenType, +) +from tqdm import tqdm +from xsdata.models.datatype import XmlDate + +logger = logging.getLogger(__name__) +logger.setLevel(os.environ.get("POLUS_LOG", logging.INFO)) + +POLUS_EXT = os.environ.get("POLUS_EXT", ".xml") + +RXIVS = { + "arXiv": {"url": "https://export.arxiv.org/oai2", "stride": 1000}, +} + + +def generate_preview( + path: Path, +) -> None: + """Generate preview of the plugin outputs.""" + prev_file = list( + Path().cwd().parents[4].joinpath("examples").rglob(f"*{POLUS_EXT}"), + )[0] + + shutil.copy(prev_file, path) + + +class ArxivDownload: + """Fetch OAI records from an API. + + Args: + rxiv: The rxiv to pull from. Must be one of ["arXiv"].str + token: A resumption token. Defaults to None. + start: Start date. Only used if `token=None`. + + Returns: + Raw XML bytes. + """ + + def __init__( + self, + path: Path, + rxiv: str, + start: Optional[datetime] = None, + ) -> None: + """Create a ArxivDownload.""" + self.path = path + self.rxiv = rxiv + self.start = start + + if self.rxiv not in RXIVS: + msg = f"{self.rxiv} is an invalid rxiv value. Must be one of {list(RXIVS)}" + raise ValueError( + msg, + ) + + if self.start is None and len(list(self.path.rglob(f"*{POLUS_EXT}"))) == 0: + self.start = datetime(1900, 1, 1) + + elif self.start is None and len(list(self.path.rglob(f"*{POLUS_EXT}"))) != 0: + self.start = self._resume_from() + + self.start = self.start + + self.params = {"verb": "ListRecords"} + + @staticmethod + def path_from_token( + path: Path, + rxiv: str, + start: Optional[datetime] = None, + token: Optional[ResumptionTokenType] = None, + ) -> Path: + """Creating output directory for saving records.""" + if start and token is not None: + file_path = path.joinpath( + f"{rxiv}_" + + f"{start.year}{str(start.month).zfill(2)}{str(start.day).zfill(0)}_" + + f"{int(token.cursor)}{POLUS_EXT}", + ) + + file_path.parent.mkdir(exist_ok=True, parents=True) + + return file_path + + def fetch_records(self) -> bytes: + """Fetch OAI records from an API.""" + # Configure parameters + if self.start is not None: + self.params.update( + { + "from": f"{self.start.year}-" + + f"{str(self.start.month).zfill(2)}-" + + f"{str(self.start.day).zfill(2)}", + "metadataPrefix": "oai_dc", + }, + ) + response = requests.get( + RXIVS["arXiv"]["url"], # type: ignore + params=self.params, + timeout=20, + ) + if response.ok: + logger.info( + f"Successfully hit url: {response.url}", + ) + else: + logger.info( + f"Error pulling data: {response.url} status {response.status_code}", + ) + + return response.content + + @staticmethod + def _get_latest(file: Path) -> datetime: + """Find the latest date to resume download files.""" + fixed_date = datetime(1900, 1, 1) + records = arxiv_records(str(file.absolute())) + if records.list_records is None: + msg = "Record list is empty!! Please download it again" + raise ValueError(msg) + for record in records.list_records.record: + if record.header is None: + msg = "Record header is empty!! Please download it again" + raise ValueError(msg) + if not isinstance(record.header.datestamp, XmlDate): + msg = "Record date is missing!!" + raise ValueError(msg) + record_date = record.header.datestamp.to_datetime() + if record_date > fixed_date: + last = record_date + return last + + def _resume_from(self) -> datetime: + """Find the previous cursor and create a resume token.""" + if not self.path.exists(): + return datetime(1900, 1, 1) + files = [ + f + for f in self.path.iterdir() + if f.is_file() and f.name.startswith(self.rxiv) + ] + + with ProcessPoolExecutor() as executor: + dates = list(executor.map(self._get_latest, files)) + return max(dates) + + @staticmethod + def save_records(path: Path, record: bytes) -> None: + """Writing response content either in XML or JSON format.""" + if POLUS_EXT == ".xml": + with Path.open(path, "wb") as fw: + fw.write(record) + fw.close() + elif POLUS_EXT == ".json": + parsed_data = xmltodict.parse(record, attr_prefix="") + json_data = json.dumps(parsed_data, indent=2) + with Path.open(path, "w") as fw: + fw.write(json_data) + fw.close() + + def fetch_and_save_records(self) -> None: + """Fetch and save response contents.""" + response = self.fetch_records() + + records = arxiv_records(BytesIO(response)) + + if records.list_records is None: + msg = "Unable to download a record" + raise ValueError(msg) + + for record in records.list_records.record: + if record.header is not None and not isinstance( + record.header.datestamp, + XmlDate, + ): + msg = "Error with downloading a XML record" + raise ValueError(msg) + + logger.info("Getting token...") + token = records.list_records.resumption_token + key, _ = token.value.split("|") + index = token.cursor + + if token.complete_list_size is None: + msg = "Error with downloading a XML record" + raise ValueError(msg) + + logger.info(f"Resuming from date: {self.start}") + + for i in tqdm( + range(int(index), token.complete_list_size, 1000), + total=((token.complete_list_size - int(index)) // 1000 + 1), + ): + thread_token = ResumptionTokenType(value="|".join([key, str(i)]), cursor=i) + + file_path = self.path_from_token( + path=self.path, + rxiv=self.rxiv, + start=self.start, + token=thread_token, + ) + self.save_records(path=file_path, record=response) diff --git a/utils/rxiv-download-tool/tests/__init__.py b/utils/rxiv-download-tool/tests/__init__.py new file mode 100644 index 0000000..17974cd --- /dev/null +++ b/utils/rxiv-download-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Rxiv Download Plugin.""" diff --git a/utils/rxiv-download-tool/tests/conftest.py b/utils/rxiv-download-tool/tests/conftest.py new file mode 100644 index 0000000..b1448d5 --- /dev/null +++ b/utils/rxiv-download-tool/tests/conftest.py @@ -0,0 +1,41 @@ +"""Test fixtures. + +Set up all data used in tests. +""" + +import shutil +import tempfile +from pathlib import Path +from typing import Union + +import pytest + + +def pytest_addoption(parser: pytest.Parser) -> None: + """Add options to pytest.""" + parser.addoption( + "--slow", + action="store_true", + dest="slow", + default=False, + help="run slow tests", + ) + + +def clean_directories() -> None: + """Remove all temporary directories.""" + for d in Path(".").cwd().iterdir(): + if d.is_dir() and d.name.startswith("tmp"): + shutil.rmtree(d) + + +@pytest.fixture() +def output_directory() -> Union[str, Path]: + """Create output directory.""" + return Path(tempfile.mkdtemp(dir=Path.cwd())) + + +@pytest.fixture(params=["2023-12-16", "2023-12-17"]) +def get_params(request: pytest.FixtureRequest) -> pytest.FixtureRequest: + """To get the parameter of the fixture.""" + return request.param diff --git a/utils/rxiv-download-tool/tests/test_cli.py b/utils/rxiv-download-tool/tests/test_cli.py new file mode 100644 index 0000000..f967909 --- /dev/null +++ b/utils/rxiv-download-tool/tests/test_cli.py @@ -0,0 +1,51 @@ +"""Test Command line Tool.""" + +from typer.testing import CliRunner +from pathlib import Path +import pytest +from polus.images.utils.rxiv_download.__main__ import app +from .conftest import clean_directories +import time + + +def test_cli(output_directory: Path, get_params: pytest.FixtureRequest) -> None: + """Test the command line.""" + runner = CliRunner() + start = get_params + result = runner.invoke( + app, + [ + "--rxiv", + "arXiv", + "--start", + start, + "--outDir", + output_directory, + ], + ) + + assert result.exit_code == 0 + time.sleep(5) + clean_directories() + + +@pytest.mark.skipif("not config.getoption('slow')") +def test_short_cli(output_directory: Path, get_params: pytest.FixtureRequest) -> None: + """Test short cli command line.""" + runner = CliRunner() + start = get_params + result = runner.invoke( + app, + [ + "-r", + "arXiv", + "-s", + start, + "-o", + output_directory, + ], + ) + + assert result.exit_code == 0 + time.sleep(5) + clean_directories() diff --git a/utils/rxiv-download-tool/tests/test_fetch.py b/utils/rxiv-download-tool/tests/test_fetch.py new file mode 100644 index 0000000..d2130ca --- /dev/null +++ b/utils/rxiv-download-tool/tests/test_fetch.py @@ -0,0 +1,43 @@ +"""Test Command line Tool.""" + +from pathlib import Path +import polus.images.utils.rxiv_download.fetch as ft +from .conftest import clean_directories +import time +import pytest +from datetime import datetime + + +def test_fetch_and_save_records( + output_directory: Path, get_params: pytest.FixtureRequest +) -> None: + """Test record fetching and saving.""" + + start = datetime.strptime(get_params, "%Y-%m-%d").date() + + model = ft.ArxivDownload(path=output_directory, rxiv="arXiv", start=start) + model.fetch_and_save_records() + + out_ext = all([Path(f.name).suffix for f in output_directory.iterdir()]) + + assert out_ext == True + + out_date = [Path(f.name).stem.split("_")[1] for f in output_directory.iterdir()][0] + assert out_date == "".join(get_params.split("-")) + clean_directories() + time.sleep(5) + + +def test_fetch_records( + output_directory: Path, get_params: pytest.FixtureRequest +) -> None: + """Test fetch records.""" + + start = datetime.strptime(get_params, "%Y-%m-%d").date() + + model = ft.ArxivDownload(path=output_directory, rxiv="arXiv", start=start) + response = model.fetch_records() + + assert response != 0 + clean_directories() + time.sleep(5) diff --git a/visualization/polus-graph-pyramid-builder-plugin/Dockerfile b/visualization/polus-graph-pyramid-builder-plugin/Dockerfile new file mode 100644 index 0000000..d303a4f --- /dev/null +++ b/visualization/polus-graph-pyramid-builder-plugin/Dockerfile @@ -0,0 +1,24 @@ +# Get image containing bfio +FROM polusai/bfio:2.1.9 + +COPY VERSION / + +ARG EXEC_DIR="/opt/executables" +ARG DATA_DIR="/data" + +#Create folders +RUN mkdir -p ${EXEC_DIR} \ + && mkdir -p ${DATA_DIR}/inputs \ + && mkdir ${DATA_DIR}/outputs + +#Copy executable +COPY src ${EXEC_DIR}/ + +RUN pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir + +RUN python3 ${EXEC_DIR}/dl_fi.py + +WORKDIR ${EXEC_DIR} + +# Default command. Additional arguments are provided through the command line +ENTRYPOINT ["python3", "/opt/executables/main.py"] diff --git a/visualization/polus-graph-pyramid-builder-plugin/README.md b/visualization/polus-graph-pyramid-builder-plugin/README.md new file mode 100644 index 0000000..4b9a1f1 --- /dev/null +++ b/visualization/polus-graph-pyramid-builder-plugin/README.md @@ -0,0 +1,40 @@ +# Polus CZI Extraction Plugin + +This WIPP plugin will import a csv collection and build a DeepZoom pyramid of graphs, where each graph contains a heatmap of each column plotted against another column. All n-columns are plotted against each other, excluding tranposed graphs and graphs where each axis has the same column. This leads to a total of (n^2-n)/2 graphs. + +Two types of graphs will be produced: +1) Linear sclaed graphs +2) Log scaled graphs + + The output will contain dzi and csv files for both linear and log scaled outputs. + There were will be two different directories that contain the pyramid images for the linear and log scaled outputs + +For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin takes one input argument and one output argument: + +| Name | Description | I/O | Type | +| -------- | ---------------------- | ------ | ---------------- | +| `inpDir` | Input CSV collection | Input | CSV Collection | +| `outDir` | Output pyramid | Output | Pyramid | + +## Run the plugin + +### Run the Docker Container + +```bash +docker run -v /path/to/data:/data graph-pyramid-builder \ + --inpDir /data/input \ + --outDir /data/output +``` diff --git a/visualization/polus-graph-pyramid-builder-plugin/VERSION b/visualization/polus-graph-pyramid-builder-plugin/VERSION new file mode 100644 index 0000000..e05cb33 --- /dev/null +++ b/visualization/polus-graph-pyramid-builder-plugin/VERSION @@ -0,0 +1 @@ +1.3.8 diff --git a/visualization/polus-graph-pyramid-builder-plugin/build-docker.sh b/visualization/polus-graph-pyramid-builder-plugin/build-docker.sh new file mode 100755 index 0000000..e96c755 --- /dev/null +++ b/visualization/polus-graph-pyramid-builder-plugin/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(' + +# Initialize the logger +logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', + datefmt='%d-%b-%y %H:%M:%S') +logger = logging.getLogger("main") +logger.setLevel(logging.INFO) + +def is_number(value): + """ This function checks to see if the value can be converted to a number """ + try: + float(value) + return True + except: + return False + +def load_csv(fpath): + """ Load a csv and select data + + Data is loaded from a csv, and data columns containing numeric values are + returned in a pandas Dataframe. The second row of the csv may contain + column classifiers, so the second row is first loaded and checked to + determine if the classifiers are present. + Inputs: + fpath - Path to csv file + Outputs: + data - A pandas Dataframe + cnames - Names of columns + """ + + # Check if the first row is column coding, and if it is then find valid columns + data = pandas.read_csv(fpath,nrows=1) + is_coded = True + cnames = [] + for ind,fname in zip(range(len(data.columns)),data.columns): + if data[fname][0] != 'F' and data[fname][0] != 'C': + is_coded = False + if is_number(data[fname][0]): + cnames.append([fname,ind]) + else: + logging.info('Column {} does not appear to contain numeric values. Not building graphs for this column.'.format(fname)) + elif data[fname][0] == 'F': + cnames.append([fname,ind]) + else: + logging.info('Skipping column {} for reason: one hot encodings'.format(fname)) + + # Load the data + if is_coded: + data = pandas.read_csv(fpath,skiprows=[1],usecols=[c[0] for c in cnames]) + + else: + data = pandas.read_csv(fpath,usecols=[c[0] for c in cnames]) + + return data, cnames + +def bin_data(data, bin_stats): + """ This function bins the data + Inputs: + data - pandas dataframe of data + bin_stats - stats of the data + Outputs: + bins - binned data ranging from (0, bincount) + graph_index - Numeric value of column index from original csv + graph_dict - a dictionary containing the indexes of graphs + """ + + column_names = data.columns + nfeats = data.shape[1] + nrows = data.shape[0] + + # Handle NaN values + data_ind = pandas.notnull(data) + data[~data_ind] = 255 + + data = data.astype(np.uint16) # cast to save memory + data[data>=bincount] = bincount - 1 # in case of numerical precision issues + + + if nrows < 2**8: + dtype = np.uint8 + elif nrows < 2**16: + dtype = np.uint16 + elif nrows < 2**32: + dtype = np.uint32 + else: + dtype = np.uint64 + + totalgraphs = int((nfeats**2 - nfeats)/2) + bins = np.zeros((totalgraphs, bincount, bincount), dtype=dtype) + graph_index = [] + graph_dict = {} + + # Create a linear index for feature bins + i = 0 + for feat1 in range(nfeats): + name1 = column_names[feat1] + feat1_tf = data[name1] * bincount + + for feat2 in range(feat1 + 1, nfeats): + graph_dict[(feat1, feat2)] = i + name2 = column_names[feat2] + + feat2_tf = data[name2] + feat2_tf = feat2_tf[data_ind[name1] & data_ind[name2]] + + if feat2_tf.size<=1: + continue + + # sort linear matrix indices + SortedFeats = np.sort(feat1_tf[data_ind[name1] & data_ind[name2]] + feat2_tf) + + # Do math to get the indices + ind2 = np.nonzero(np.diff(SortedFeats))[0] # nonzeros are cumulative sum of all bin values + ind2 = np.append(ind2,SortedFeats.size-1) + rows = (SortedFeats[ind2]/bincount).astype(np.uint8) # calculate row from linear index + cols = np.mod(SortedFeats[ind2],bincount) # calculate column from linear index + counts = np.diff(ind2) # calculate the number of values in each bin + + bins[i,rows[0],cols[0]] = ind2[0] + 1 + bins[i,rows[1:],cols[1:]] = counts + graph_index.append([feat1,feat2]) + i = i + 1 + + return bins, graph_index, graph_dict + +def transform_data(data,column_names, typegraph): + """ Bin the data + + Data from a pandas Dataframe is binned in two dimensions. Binning is performed by + binning data in one column along one axis and another column is binned along the + other axis. All combinations of columns are binned without repeats or transposition. + There are only bincount number of bins in each dimension, and each bin is 1/bincount the size of the + difference between the maximum and minimum of each column. + If the data needs to be logarithmically scaled, then the data is transformed by the algorithm presented + in this paper: https://iopscience.iop.org/article/10.1088/0957-0233/24/2/027001 + Inputs: + data - A pandas Dataframe, with nfeats number of columns + column_names - Names of Dataframe columns + typegraph - Defines whether logarithmic scale or linear scalef + Outputs: + bins - A numpy matrix that has shape (int((nfeats**2 - nfeats)/2),bincount,bincount) + bin_feats - A list containing the minimum and maximum values of each column + index - Numeric value of column index from original csv + diction - a dictionary containing the indexes of graphs + """ + + nfeats = len(column_names) + + # If logarithmic, need to transform the data + # https://iopscience.iop.org/article/10.1088/0957-0233/24/2/027001 + # Adjusts for behavior near zero + + if typegraph == "log": + C = 1/np.log(10)# Derivative of Natural Log e, d(ln(x))/dx = 1/x + data = data.astype(np.float64) + data = np.sign(data) * np.log10(1 + (abs(data/C))) + + bin_stats = {'min': data.min(), + 'max': data.max(), + 'binwidth': (data.max()-data.min()+10**-6)/bincount} + + + # Transform data into bin positions for fast binning + data = ((data - bin_stats['min'])/bin_stats['binwidth']).apply(np.floor) + + bins, index, diction = bin_data(data, bin_stats) + return bins, bin_stats, index, diction + +""" 2. Plot Generation """ +def format_ticks(out): + """ Generate tick labels + Polus Plots uses D3 to generate the plots. This function tries to mimic the + formatting of tick labels. In place of using scientific notation a scale + prefix is appended to the end of the number. See _prefix comments to see the + suffixes that are used. Numbers that are larger or smaller than 10**24 or + 10**-24 respectively are not handled and may throw an error. Values outside + of this range do not currently have an agreed upon prefix in the measurement + science community. + + Inputs: + out - the values of the ticks used in graph + Outputs: + fticks - a list of strings containing formatted tick labels + """ + _prefix = { + -24: 'y', # yocto + -21: 'z', # zepto + -18: 'a', # atto + -15: 'f', # femto + -12: 'p', # pico + -9: 'n', # nano + -6: 'u', # micro + -3: 'm', # mili + 0: ' ', + 3: 'k', # kilo + 6: 'M', # mega + 9: 'G', # giga + 12: 'T', # tera + 15: 'P', # peta + 18: 'E', # exa + 21: 'Z', # zetta + 24: 'Y', # yotta + } + + fticks = [] + convertprefix = [] + for i in out: + formtick = "%#.3f" % i + decformtick = '%.2e' % Decimal(formtick) + convertexponent = float(decformtick[-3:]) + numbers = float(decformtick[:-4]) + if convertexponent > 0: + if convertexponent % 3 == 2: + movednum = round(numbers/10,2) + newprefix = _prefix[int(convertexponent + 1)] + formtick = str(movednum) + newprefix + elif convertexponent % 3 == 1: + movednum = round(numbers*10,1) + newprefix = _prefix[int(convertexponent - 1)] + formtick = str(movednum) + newprefix + else: + newprefix = _prefix[int(convertexponent)] + if i < 0: + formtick = str(decformtick[:5]) + newprefix + else: + formtick = str(decformtick[:4]) + newprefix + elif convertexponent < 0: + if convertexponent % -3 == -2: + movednum = round(numbers*10,1) + newprefix = _prefix[int(convertexponent - 1)] + formtick = str(movednum) + newprefix + elif convertexponent % -3 == -1: + movednum = round(numbers/10,2) + newprefix = _prefix[int(convertexponent + 1)] + formtick = str(movednum) + newprefix + else: + newprefix = _prefix[convertexponent] + if i < 0: + formtick = str(decformtick[:5]) + newprefix + else: + formtick = str(decformtick[:4]) + newprefix + else: + if i < 0: + formtick = str(decformtick[:5]) + _prefix[int(convertexponent)] + else: + formtick = str(decformtick[:4]) + _prefix[int(convertexponent)] + convertprefix.append(int(convertexponent)) + fticks.append(formtick) + + return fticks + +# Create a custom colormap to mimick Polus Plots +def get_cmap(): + + cmap_values = [[1.0,1.0,1.0,1.0]] + cmap_values.extend([[r/255,g/255,b/255,1] for r,g,b in zip(np.arange(0,255,2), + np.arange(153,255+1/128,102/126), + np.arange(34+1/128,0,-34/126))]) + cmap_values.extend([[r/255,g/255,b/255,1] for r,g,b in zip(np.arange(255,136-1/128,-119/127), + np.arange(255,0,-2), + np.arange(0,68+1/128,68/127))]) + cmap = ListedColormap(cmap_values) + + return cmap + +def gen_plot(col1, + col2, + indexdict, + column_names, + bin_stats, + fig, + ax, + data, + typegraph): + """ Generate a heatmap + Generate a heatmap of data for column 1 against column 2. + Inputs: + col1 - the column plotted on the y-axis + col2 - column plotted on the x-axis + indexdict - a dictionary containing the indexes of graphs + column_names - list of column names + bin_stats - a list containing the min,max values of each column + fig - pregenerated figure + ax - pregenerated axis + data - p regenerated heatmap bbox artist + typegraph - specifies whether the data is log scaled or linearly scaled + Outputs: + hmap - A numpy array containing pixels of the heatmap + """ + def keepdecreasing(labeltexts0, decreasefont, bbxtext): + """ This function decreases the size of the labels if its too big """ + labeltexts0.set_fontsize(decreasefont) + bbxtext = labeltexts0.get_window_extent(renderer = fig.canvas.renderer) + decreasefont = decreasefont - 1 + return bbxtext, decreasefont + + def calculateticks(ticks, bin_width, fmin, typegraph): + """ This functio n calculates the tick values for the graphs """ + + if typegraph == "linear": + tick_vals = [t for t in ticks*bin_width+fmin] + if typegraph == "log": + C = 1/np.log(10) + tick_vals = [np.sign(t)*C*(-1+(10**abs(t))) for t in ticks*bin_width+fmin] + return tick_vals + + if col2>col1: + d = np.squeeze(bins[indexdict[col1, col2],:,:]) + r = col1 + c = col2 + elif col2 CHUNK_SIZE) or (bbxtext.y0 < 0 or bbxtext.y1 > (CHUNK_SIZE*.075)): + bbxtext, decreasefont = keepdecreasing(axlabel.texts[0], decreasefont, bbxtext) + + # This is to decrease the size of the title labels if the name is too large (Y AXIS LABEL) + if len(aylabel.texts) == 0: + aylabel.text(0.5, 0.5, "\n".join(wrap(cname_r, 60)), va = 'center', ha = 'center', fontsize = sizefont, rotation = 90, wrap = True) + else: + aylabeltext0 = aylabel.texts[0] + aylabeltext0.set_text("\n".join(wrap(cname_r, 60))) + aylabeltext0.set_fontsize(sizefont) + + bbytext = (aylabel.texts[0]).get_window_extent(renderer = fig.canvas.renderer) + decreasefont = sizefont - 1 + while (bbytext.y0 < 0 or bbytext.y1 > CHUNK_SIZE) or (bbytext.x0 < 0 or bbytext.x1 > (CHUNK_SIZE*.075)): + bbytext, decreasefont = keepdecreasing(aylabel.texts[0], decreasefont, bbytext) + + while len(ax.lines) > 0: + ax.lines[-1].remove() + + # Calculating the value of each tick in the graph (fixed width) + fmin_c = bin_stats['min'][cname_c] + fmax_c = bin_stats['max'][cname_c] + binwidth_c = bin_stats['binwidth'][cname_c] + tick_vals_c= calculateticks(ax.get_xticks(), binwidth_c, fmin_c, typegraph) + if fmin_c < 0: # draw x=0 + ax.axvline(x=abs(fmin_c)/binwidth_c) + ax.set_xticklabels(format_ticks(tick_vals_c), rotation=45, fontsize = 5, ha='right') + + # Calculating the value of each tick in the graph (fixed width) + fmin_r = bin_stats['min'][cname_r] + fmax_r = bin_stats['max'][cname_r] + binwidth_r = bin_stats['binwidth'][cname_r] + tick_vals_r = calculateticks(ax.get_yticks(), binwidth_r, fmin_r, typegraph) + if fmin_r < 0: # draw y=0 + ax.axhline(y=abs(fmin_r)/binwidth_r) + ax.set_yticklabels(format_ticks(tick_vals_r), fontsize=5, ha='right') + + fig.canvas.draw() + hmap = np.array(fig.canvas.renderer.buffer_rgba()) + + return hmap + +def get_default_fig(cmap): + """ Generate a default figure, axis, and heatmap artist + Generate a figure and draw an empty graph with useful settings for repeated + drawing of new figures. By passing the existing figure, axis, and heatmap + artist to the plot generator, many things do not need to be drawn from + scratch. This decreases the plot drawing time by a factor of 2-3 times. + Inputs: + cmap - the heatmap colormap + Outputs: + fig - A reference to the figure object + ax - A reference to the axis object + data - A reference to the heatmap artist + """ + fig, ax = plt.subplots(dpi=int(CHUNK_SIZE/4),figsize=(4,4),tight_layout={'h_pad':1,'w_pad':1}) + datacolor = ax.pcolorfast(np.zeros((bincount, bincount),np.uint64),cmap=cmap) + ticks = [t for t in range(0, bincount+1, int(bincount/(10)))] + + ax.set_xlim(0,bincount) + ax.set_ylim(0,bincount) + ax.set_xticks(ticks) + ax.set_yticks(ticks) + ax.set_xlabel(" ") + ax.set_ylabel(" ") + + ax.set_xticklabels(ticks, rotation = 45) + ax.set_yticklabels(ticks) + + fig.canvas.draw() + + axlabel = fig.add_axes([.075, 0, 1, .075], frameon = False, alpha = .5, facecolor = 'b') + axlabel.set_xticks([]) + axlabel.set_yticks([]) + axlabel.set_clip_on(True) + aylabel = fig.add_axes([0, .075, .075, 1], frameon = False, alpha = .5, facecolor = 'b') + aylabel.set_xticks([]) + aylabel.set_yticks([]) + aylabel.set_clip_on(True) + + return fig, ax, datacolor + +""" 3. Pyramid generation functions """ + +def _avg2(image): + """ Average pixels with optical field of 2x2 and stride 2 """ + + # Convert 32-bit pixels to prevent overflow during averaging + image = image.astype(np.uint32) + imageshape0 = image.shape[0] + imageshape1 = image.shape[1] + # Get the height and width of each image to the nearest even number + y_max = imageshape0 - imageshape0 % 2 + x_max = imageshape1 - imageshape1 % 2 + + # Perform averaging + avg_img = np.zeros(np.ceil([image.shape[0]/2,image.shape[1]/2,image.shape[2]]).astype(np.uint32)) + for z in range(4): + avg_img[0:int(y_max/2),0:int(x_max/2),z]= (image[0:y_max-1:2,0:x_max-1:2,z] + \ + image[1:y_max:2,0:x_max-1:2,z] + \ + image[0:y_max-1:2,1:x_max:2,z] + \ + image[1:y_max:2,1:x_max:2,z]) / 4 + + # The next if statements handle edge cases if the height or width of the + # image has an odd number of pixels + if y_max != imageshape0: + for z in range(3): + avg_img[-1,:int(x_max/2),z] = (image[-1,0:x_max-1:2,z] + \ + image[-1,1:x_max:2,z]) / 2 + if x_max != imageshape1: + for z in range(4): + avg_img[:int(y_max/2),-1,z] = (image[0:y_max-1:2,-1,z] + \ + image[1:y_max:2,-1,z]) / 2 + if y_max != imageshape0 and x_max != imageshape1: + for z in range(4): + avg_img[-1,-1,z] = image[-1,-1,z] + return avg_img + +def metadata_to_graph_info(outPath,outFile, ngraphs): + + # Create an output path object for the info file + op = Path(outPath).joinpath("{}.dzi".format(outFile)) + + # create an output path for the images + of = Path(outPath).joinpath('{}_files'.format(outFile)) + of.mkdir(exist_ok=True) + + # Get metadata info from the bfio reader + rows = np.ceil(np.sqrt(ngraphs)) + cols = np.round(np.sqrt(ngraphs)) + sizes = [cols*CHUNK_SIZE,rows*CHUNK_SIZE] + + # Calculate the number of pyramid levels + num_scales = np.ceil(np.log2(rows*CHUNK_SIZE)).astype(np.uint8) + + # create a scales template, use the full resolution + scales = { + "size":sizes, + "key": num_scales + } + + # initialize the json dictionary + info = { + "scales": [scales], # Will build scales belows + "rows": rows, + "cols": cols + } + + # create the information for each scale + for i in range(1,num_scales+1): + previous_scale = info['scales'][-1] + current_scale = copy.deepcopy(previous_scale) + current_scale['key'] = str(num_scales - i) + current_scale['size'] = [int(np.ceil(previous_scale['size'][0]/2)),int(np.ceil(previous_scale['size'][1]/2))] + info['scales'].append(current_scale) + + # write the dzi file + with open(op,'w') as writer: + writer.write(DZI.format(int(info['cols']*CHUNK_SIZE),int(info['rows']*CHUNK_SIZE))) + + return info + + +def _get_higher_res(S,info,cnames, outpath,out_file,indexscale,indexdict,binstats, typegraph, X=None,Y=None): + """ + The following function builds the image pyramid at scale S by building up only + the necessary information at high resolution layers of the pyramid. So, if 0 is + the original resolution of the image, getting a tile at scale 2 will generate + only the necessary information at layers 0 and 1 to create the desired tile at + layer 2. This function is recursive and can be parallelized. + Inputs: + S - current scale + info - dictionary of scale information + outpath - directory for all outputs + out_file - directory for current dataset + indexscale - index of the graph + binstats - stats for the binned data + typegraph - specifies whether the data is linear or logarithmically scaled + Outputs: + DeepZoom format of images. + """ + + # Get the scale info + num_scales = len(info['scales']) + scale_info = info['scales'][num_scales-S-1] + + if scale_info==None: + raise ValueError("No scale information for resolution {}.".format(S)) + if X == None: + X = [0,scale_info['size'][0]] + if Y == None: + Y = [0,scale_info['size'][1]] + + # Modify upper bound to stay within resolution dimensions + if X[1] > scale_info['size'][0]: + X[1] = scale_info['size'][0] + if Y[1] > scale_info['size'][1]: + Y[1] = scale_info['size'][1] + + # Initialize the output + image = np.zeros((int(Y[1]-Y[0]),int(X[1]-X[0]),4),dtype=np.uint8) + + # If requesting from the lowest scale, then just generate the graph + if S==num_scales-1: + index = int((int(Y[0]/CHUNK_SIZE) + int(X[0]/CHUNK_SIZE) * info['rows'])) + if index>=len(indexscale): + image = np.ones((CHUNK_SIZE,CHUNK_SIZE,4),dtype=np.uint8) * (bincount + 55) + else: + image = gen_plot(col1=indexscale[index][0], + col2=indexscale[index][1], + indexdict=indexdict, + column_names=cnames, + bin_stats=binstats, + fig=fig, + ax=ax, + data=datacolor, + typegraph=typegraph) + + else: + # Set the subgrid dimensions + subgrid_dimX = list(np.arange(2*X[0], 2*X[1], CHUNK_SIZE).astype('int')) + subgrid_dimX.append(2*X[1]) + subgrid_dimY = list(np.arange(2*Y[0], 2*Y[1], CHUNK_SIZE).astype('int')) + subgrid_dimY.append(2*Y[1]) + + + for y in range(0,len(subgrid_dimY)-1): + subgrid_Y_ind0 = np.ceil((subgrid_dimY[y] - subgrid_dimY[0])/2).astype('int') + subgrid_Y_ind1 = np.ceil((subgrid_dimY[y+1] - subgrid_dimY[0])/2).astype('int') + for x in range(0,len(subgrid_dimX)-1): + subgrid_X_ind0 = np.ceil((subgrid_dimX[x] - subgrid_dimX[0])/2).astype('int') + subgrid_X_ind1 = np.ceil((subgrid_dimX[x+1] - subgrid_dimX[0])/2).astype('int') + if S==(num_scales - 6): #to use multiple processors to compute faster. + sub_image = _get_higher_res_par(S=S+1, + info=info, + cnames=cnames, + outpath=outpath, + out_file=out_file, + indexscale=indexscale, + indexdict=indexdict, + binstats=binstats, + typegraph=typegraph, + X=subgrid_dimX[x:x+2], + Y=subgrid_dimY[y:y+2]) + else: + sub_image = _get_higher_res(S=S+1, + info=info, + cnames=cnames, + outpath=outpath, + out_file=out_file, + indexscale=indexscale, + indexdict=indexdict, + binstats=binstats, + typegraph=typegraph, + X=subgrid_dimX[x:x+2], + Y=subgrid_dimY[y:y+2]) + + image[subgrid_Y_ind0:subgrid_Y_ind1, subgrid_X_ind0:subgrid_X_ind1,:] = _avg2(sub_image) + del sub_image + + # Write the chunk + outpath = Path(outpath).joinpath('{}_files'.format(out_file),str(S)) + outpath.mkdir(exist_ok=True) + imageio.imwrite(outpath.joinpath('{}_{}.png'.format(int(X[0]/CHUNK_SIZE),int(Y[0]/CHUNK_SIZE))),image,format='PNG-FI',compression=1) + logger.info('Finished building tile (scale,X,Y): ({},{},{})'.format(S,int(X[0]/CHUNK_SIZE),int(Y[0]/CHUNK_SIZE))) + return image + +# This function performs the same operation as _get_highe_res, except it uses multiprocessing to grab higher +# resolution layers at a specific layer. +def _get_higher_res_par(S,info, cnames, outpath,out_file,indexscale, indexdict, binstats, typegraph, X=None,Y=None): + # Get the scale info + num_scales = len(info['scales']) + scale_info = info['scales'][num_scales-S-1] + + if scale_info==None: + ValueError("No scale information for resolution {}.".format(S)) + + if X == None: + X = [0,scale_info['size'][0]] + if Y == None: + Y = [0,scale_info['size'][1]] + + # Modify upper bound to stay within resolution dimensions + if X[1] > scale_info['size'][0]: + X[1] = scale_info['size'][0] + if Y[1] > scale_info['size'][1]: + Y[1] = scale_info['size'][1] + + # Initialize the output + image = np.zeros((Y[1]-Y[0],X[1]-X[0],4),dtype=np.uint8) + # If requesting from the lowest scale, then just generate the graph + if S==int(info['scales'][0]['key']): + index = (int(Y[0]/CHUNK_SIZE) + int(X[0]/CHUNK_SIZE) * info['rows']) + if index>=len(indexscale): + image = np.ones((CHUNK_SIZE,CHUNK_SIZE,4),dtype=np.uint8) * (bincount + 55) + else: + image = gen_plot(col1=indexscale[index][0], + col2=indexscale[index][1], + indexdict=indexdict, + column_names=cnames, + bin_stats=binstats, + fig=fig, + ax=ax, + data=datacolor, + typegraph=typegraph) + + else: + # Set the subgrid dimensions + subgrid_dimX = list(np.arange(2*X[0], 2*X[1], CHUNK_SIZE).astype('int')) + subgrid_dimX.append(2*X[1]) + subgrid_dimY = list(np.arange(2*Y[0], 2*Y[1], CHUNK_SIZE).astype('int')) + subgrid_dimY.append(2*Y[1]) + + subgrid_images = [] + + with Pool(processes=np.min(4,initial=multiprocessing.cpu_count())) as pool: + for y in range(0,len(subgrid_dimY)-1): + subgrid_Y_ind0 = np.ceil((subgrid_dimY[y] - subgrid_dimY[0])/2).astype('int') + subgrid_Y_ind1 = np.ceil((subgrid_dimY[y+1] - subgrid_dimY[0])/2).astype('int') + for x in range(0,len(subgrid_dimX)-1): + subgrid_X_ind0 = np.ceil((subgrid_dimX[x] - subgrid_dimX[0])/2).astype('int') + subgrid_X_ind1 = np.ceil((subgrid_dimX[x+1] - subgrid_dimX[0])/2).astype('int') + subgrid_images.append(pool.apply_async(_get_higher_res,(S+1, + info, + cnames, + outpath, + out_file, + indexscale, + indexdict, + binstats, + typegraph, + subgrid_dimX[x:x+2], + subgrid_dimY[y:y+2]))) + image[subgrid_Y_ind0:subgrid_Y_ind1,subgrid_X_ind0:subgrid_X_ind1,:] = _avg2((subgrid_images[y*(len(subgrid_dimX)-1) + x]).get()) + + del subgrid_images + + # Write the chunk + outpath = Path(outpath).joinpath('{}_files'.format(out_file),str(S)) + outpath.mkdir(exist_ok=True) + imageio.imwrite(outpath.joinpath('{}_{}.png'.format(int(X[0]/CHUNK_SIZE),int(Y[0]/CHUNK_SIZE))),image,format='PNG-FI',compression=1) + logger.info('Finished building tile (scale,X,Y): ({},{},{})'.format(S,int(X[0]/CHUNK_SIZE),int(Y[0]/CHUNK_SIZE))) + return image + +def write_csv(cnames,index,f_info,out_path,out_file): + """ This function writes the csv file necessary for the Deep Zoom format """ + + header = 'dataset_id, x_axis_id, y_axis_id, x_axis_name, y_axis_name, title, length, width, global_row, global_col\n' + line = '{:d}, {:d}, {:d}, {:s}, {:s}, default title, {:d}, {:d}, {:d}, {:d}\n' + l_ind = 0 + with open(str(Path(out_path).joinpath(out_file+'.csv').absolute()),'w') as writer: + writer.write(header) + for ind in index: + ind1 = ind[1] + ind0 = ind[0] + writer.write(line.format(1, + cnames[ind1][1], + cnames[ind0][1], + cnames[ind1][0], + cnames[ind0][0], + CHUNK_SIZE, + CHUNK_SIZE, + int(np.mod(l_ind,f_info['rows'])), + int(l_ind/f_info['rows']))) + l_ind += 1 + +if __name__=="__main__": + + + """ Initialize argument parser """ + logger.info("Parsing arguments...") + parser = argparse.ArgumentParser(prog='main', description='Build an image pyramid from data in a csv file.') + + """ Define the arguments """ + parser.add_argument('--inpDir', dest='inpDir', type=str, + help='Path to input images.', required=True) + + parser.add_argument('--outDir', dest='outDir', type=str, + help='Path to output images.', required=True) + + parser.add_argument('--bincount', dest='bin_count', type=int, + help='Number of bins', required=True) + + parser.add_argument('--scale', dest='scale', type=str, + help='Linear, Log, or Both', required=False) + + """ Get the input arguments """ + args = parser.parse_args() + + input_path = args.inpDir + output_path = Path(args.outDir) + bincount = args.bin_count + scales = [args.scale.lower()] + all_scales = ['linear','log'] + if scales[0] not in all_scales: + scales = all_scales + + logger.info('inpDir = {}'.format(input_path)) + logger.info('outDir = {}'.format(output_path)) + + # Set up the logger for each scale + loggers = {} + for scale in scales: + loggers[scale] = logging.getLogger("main.{}".format(scale.upper())) + loggers[scale].setLevel(logging.INFO) + + # Get the path to each csv file in the collection + input_files = [str(f.absolute()) for f in Path(input_path).iterdir() if ''.join(f.suffixes)=='.csv'] + + # Generate the default figure components + logger.info('Generating colormap and default figure...') + cmap = get_cmap() + fig, ax, datacolor = get_default_fig(cmap) + logger.info('Done!') + + for f in input_files: + + logger.info('Loading csv: {}'.format(f)) + data, cnames = load_csv(f) + column_names = [c[0] for c in cnames] + + for scale in scales: + + # Set the file path folder + folder_name = Path(f).name.replace('.csv','_{}'.format(scale)) + + # Process for current scale + loggers[scale].info('Processing: {}'.format(folder_name)) + + # Bin the data + loggers[scale].info('Binning data for {} {} features...'.format(len(column_names),scale.upper())) + bins, bin_stats, data_index, data_dict = transform_data(data,column_names, scale) + + # Generate the dzi file + loggers[scale].info('Generating pyramid {} metadata...'.format(scale.upper())) + ngraphs = len(data_index) + info_data = metadata_to_graph_info(output_path,folder_name, ngraphs) + loggers[scale].info('Done!') + + loggers[scale].info('Writing {} layout file...!'.format(scale.upper())) + write_csv(cnames,data_index,info_data,output_path,folder_name) + loggers[scale].info('Done!') + + # Create the pyramid + loggers[scale].info('Building {} pyramids...'.format(scale.upper())) + image_data = _get_higher_res(0, info_data,column_names, output_path,folder_name,data_index, data_dict, bin_stats, scale) + loggers[scale].info('Done!') diff --git a/visualization/polus-graph-pyramid-builder-plugin/src/requirements.txt b/visualization/polus-graph-pyramid-builder-plugin/src/requirements.txt new file mode 100644 index 0000000..da4cf76 --- /dev/null +++ b/visualization/polus-graph-pyramid-builder-plugin/src/requirements.txt @@ -0,0 +1,4 @@ +pandas>=0.25.1 +matplotlib>=3.1.1 +numpy>=1.21.0 +imageio==2.5.0 diff --git a/visualization/tabular-to-microjson-tool/.bumpversion.cfg b/visualization/tabular-to-microjson-tool/.bumpversion.cfg new file mode 100644 index 0000000..94c23e6 --- /dev/null +++ b/visualization/tabular-to-microjson-tool/.bumpversion.cfg @@ -0,0 +1,27 @@ +[bumpversion] +current_version = 0.1.2-dev0 +commit = True +tag = False +parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? +serialize = + {major}.{minor}.{patch}-{release}{dev} + {major}.{minor}.{patch} + +[bumpversion:part:release] +optional_value = _ +first_value = dev +values = + dev + _ + +[bumpversion:part:dev] + +[bumpversion:file:pyproject.toml] +search = version = "{current_version}" +replace = version = "{new_version}" + +[bumpversion:file:plugin.json] + +[bumpversion:file:VERSION] + +[bumpversion:file:src/polus/images/visualization/tabular_to_microjson/__init__.py] diff --git a/visualization/tabular-to-microjson-tool/Dockerfile b/visualization/tabular-to-microjson-tool/Dockerfile new file mode 100644 index 0000000..4b00db3 --- /dev/null +++ b/visualization/tabular-to-microjson-tool/Dockerfile @@ -0,0 +1,20 @@ +#!/bin/bash +FROM polusai/bfio:2.1.9 + +# environment variables defined in polusai/bfio +ENV EXEC_DIR="/opt/executables" +ENV POLUS_LOG="INFO" +ENV POLUS_IMG_EXT=".ome.tif" +ENV POLUS_TAB_EXT=".csv" + +# Work directory defined in the base container +WORKDIR ${EXEC_DIR} + +COPY pyproject.toml ${EXEC_DIR} +COPY VERSION ${EXEC_DIR} +COPY README.md ${EXEC_DIR} +COPY src ${EXEC_DIR}/src +RUN pip3 install ${EXEC_DIR} --no-cache-dir + +ENTRYPOINT ["python3", "-m", "polus.images.visualization.tabular_to_microjson"] +CMD ["--help"] diff --git a/visualization/tabular-to-microjson-tool/README.md b/visualization/tabular-to-microjson-tool/README.md new file mode 100644 index 0000000..3d1b5a7 --- /dev/null +++ b/visualization/tabular-to-microjson-tool/README.md @@ -0,0 +1,62 @@ +# Tabular To Microjson(v0.1.1) + +This plugin uses [MICROJSON](https://github.com/bengtl/microjson/tree/dev) python library to generate JSON from tabular data which can be used in +[RENDER UI](https://render.ci.ncats.io/?imageUrl=https://files.scb-ncats.io/pyramids/Idr0033/precompute/41744/x(00-15)_y(01-24)_p0(1-9)_c(1-5)/) +application for visualization of microscopy images. + +This plugin allows to calculate geometry coordinates i-e `Polygon` and `Point` using image positions from corresponding stitching vector. +Note: The filenames of tabular and stitching vector should be same +`groupBy` is used when there are more than one image in each well then pass a `variable` used in `stitchPattern` to group filenames in a stitching vector to compute geometry coordinates. + +Note: Currently this plugin supports two geometry types `Polygon` and `Point`.A future work requires additional support of more geometry types in this plugin. + +Currently this plugins handles only three file formats supported by vaex. +1. csv +2. arrow +3. feather + + +Contact [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. +For more information on WIPP, visit the +[official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). + +## Building + +To build the Docker image for the conversion plugin, run +`./build-docker.sh`. + +## Install WIPP Plugin + +If WIPP is running, navigate to the plugins page and add a new plugin. Paste the +contents of `plugin.json` into the pop-up window and submit. + +## Options + +This plugin can take seven input arguments and one output argument: + +| Name | Description | I/O | Type | +|-------------------|-------------------------------------------------------|--------|--------------| +| `inpDir` | Input directory | Input | string | +| `stitchDir` | Directory containing stitching vectors | Input | string | +| `filePattern` | Pattern to parse tabular filenames | Input | string | +| `stitchPattern` | Pattern to parse filenames in stitching vector | Input | string | +| `groupBy` | Variable to group filenames in stitching vector | Input | string | +| `geometryType` | Geometry type (Polygon, Point) | Input | string | +| `outDir` | Output directory for overlays | Output | string | +| `preview` | Generate a JSON file with outputs | Output | JSON | + +## Run the plugin + +### Run the Docker Container + +```bash +docker run -v /data:/data polusai/tabular-to-microjson-plugin:0.1.1 \ + --inpDir /data/input \ + --stitchDir /data/stitchvector \ + --filePattern ".*.csv" \ + --stitchPattern "x{x:dd}_y{y:dd}_c{c:d}.ome.tif" \ + --groupBy None \ + --geometryType "Polygon" \ + --outDir /data/output \ + --preview +``` diff --git a/visualization/tabular-to-microjson-tool/VERSION b/visualization/tabular-to-microjson-tool/VERSION new file mode 100644 index 0000000..9d8d2c1 --- /dev/null +++ b/visualization/tabular-to-microjson-tool/VERSION @@ -0,0 +1 @@ +0.1.2-dev0 diff --git a/visualization/tabular-to-microjson-tool/build-docker.sh b/visualization/tabular-to-microjson-tool/build-docker.sh new file mode 100644 index 0000000..c63f2c2 --- /dev/null +++ b/visualization/tabular-to-microjson-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$("] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9,<4.0" +typer = "^0.7.0" +filepattern = "^2.0.1" +tqdm = "^4.65.0" +pandas = "^2.0.3" +microjson = "^0.1.9" +vaex = "^4.17.0" +pydantic = "^2.4.2" + + +[tool.poetry.group.dev.dependencies] +bump2version = "^1.0.1" +pre-commit = "^3.1.0" +black = "^23.1.0" +flake8 = "^6.0.0" +mypy = "^1.0.1" +pytest = "^7.2.1" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/visualization/tabular-to-microjson-tool/run-plugin.sh b/visualization/tabular-to-microjson-tool/run-plugin.sh new file mode 100644 index 0000000..d3f055c --- /dev/null +++ b/visualization/tabular-to-microjson-tool/run-plugin.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +version=$( None: + """Apply Render Overlay to input tabular data to create microjson overlays.""" + logger.info(f"inpDir = {inp_dir}") + logger.info(f"filePattern = {file_pattern}") + logger.info(f"stitchDir = {stitch_dir}") + logger.info(f"geometryType = {geometry_type}") + logger.info(f"stitchPattern = {stitch_pattern}") + logger.info(f"groupBy = {group_by}") + logger.info(f"outDir = {out_dir}") + + inp_dir = inp_dir.resolve() + out_dir = out_dir.resolve() + + fps = fp.FilePattern(inp_dir, file_pattern) + + files = [file[1][0] for file in fps()] + + with ThreadPoolExecutor(max_workers=num_workers) as executor: + for file in tqdm(files, desc="Creating overlays", total=len(files)): + fname = pathlib.Path(file).stem + stitch_path = stitch_dir.joinpath(f"{fname}.txt") + if geometry_type == "Polygon": + poly = mo.PolygonSpec( + stitch_path=str(stitch_path), + stitch_pattern=stitch_pattern, + group_by=group_by, + ) + else: + poly = mo.PointSpec( + stitch_path=str(stitch_path), + stitch_pattern=stitch_pattern, + group_by=group_by, + ) + + micro_model = mo.RenderOverlayModel( + file_path=file, + coordinates=poly.get_coordinates, + geometry_type=geometry_type, + out_dir=out_dir, + ) + executor.submit(micro_model.microjson_overlay) + + if preview: + shutil.copy( + pathlib.Path(__file__) + .parents[5] + .joinpath(f"examples/example_overlay_{geometry_type}.json"), + out_dir, + ) + + +if __name__ == "__main__": + app() diff --git a/visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/microjson_overlay.py b/visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/microjson_overlay.py new file mode 100644 index 0000000..6e249c1 --- /dev/null +++ b/visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/microjson_overlay.py @@ -0,0 +1,413 @@ +"""Render Overlay.""" +import ast +import logging +import os +from pathlib import Path +from typing import Any +from typing import Optional +from typing import Union + +import filepattern as fp +import microjson.model as mj +import numpy as np +import pydantic +import vaex +from pydantic import root_validator +from pydantic import validator + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".csv") +EXT = (".arrow", ".feather") + + +def convert_vaex_dataframe(file_path: Path) -> vaex.dataframe.DataFrame: + """The vaex reading of tabular data with (".csv", ".feather", ".arrow") format. + + Args: + file_path: Path to tabular data. + + Returns: + A vaex dataframe. + """ + if file_path.name.endswith(".csv"): + return vaex.read_csv(Path(file_path), convert=True, chunk_size=5_000_000) + if file_path.name.endswith(EXT): + return vaex.open(Path(file_path)) + return None + + +class CustomOverlayModel(pydantic.BaseModel): + """Setting up configuration for pydantic base model.""" + + class Config: + """Model configuration.""" + + extra = "allow" + allow_population_by_field_name = True + + +class Validator(CustomOverlayModel): + """Validate stiching vector path and stiching pattern fields. + + This validates values passed for stitch_path and stitch_pattern attributes. + + Args: + stitch_path: Path to the stitching vector, containing x and y image positions. + stitch_pattern: Pattern to parse image filenames in stitching vector. + + Returns: + Attribute values + + """ + + stitch_path: str + stitch_pattern: str + + @root_validator(pre=True) + def validate_stitch_path(cls, values: dict) -> dict: # noqa: N805 + """Validate stitch path and stitch pattern.""" + stitch_path = values.get("stitch_path") + stitch_pattern = values.get("stitch_pattern") + if stitch_path is not None and not Path(stitch_path).exists(): + msg = "Stitching path does not exists!! Please do check path again" + raise ValueError(msg) + if stitch_path is not None and Path(stitch_path).exists(): + with Path.open(Path(stitch_path)) as f: + line = f.readlines() + if line is None: + msg = ( + "Stitching vector is empty so grid positions cannot be defined" + ) + raise ValueError(msg) + if stitch_path is not None and Path(stitch_path).exists(): + files = fp.FilePattern(stitch_path, stitch_pattern) + if len(files) == 0: + msg = "Define stitch pattern again!!! as it is unable to parse file" + raise ValueError(msg) + + return values + + +class PolygonSpec(Validator): + """Polygon is a two-dimensional planar shape with straight sides. + + This generates rectangular polygon coordinates from (x, y) coordinate positions. + + Args: + stitch_path: Path to the stitching vector, containing x and y image positions. + stitch_pattern: Pattern to parse image filenames in stitching vector. + group_by: Variable to group image filenames in stitching vector. + + Returns: + A list of a list of tuples of rectangular polygon coordinates. + + """ + + stitch_path: str + stitch_pattern: str + group_by: Optional[str] = None + + @property + def get_coordinates(self) -> list[Any]: + """Generate rectangular polygon coordinates.""" + files = fp.FilePattern(self.stitch_path, self.stitch_pattern) + self.group_by = None if self.group_by == "None" else self.group_by + + if self.group_by is not None: + var_list = files.get_unique_values() + var_dict = {k: len(v) for k, v in var_list.items() if k == self.group_by} + gp_value = var_dict[self.group_by] + gp_dict = {self.group_by: gp_value} + + coordinates = [] + for i, matching in enumerate(files.get_matching(**gp_dict)): + if i == 0: + cell_width = matching[0]["posX"] + x, y = matching[0]["posX"], matching[0]["posY"] + pos1 = [x, y] + pos2 = [x + cell_width, y] + pos3 = [x + cell_width, y + cell_width] + pos4 = [x, y + cell_width] + pos5 = [x, y] + poly = str([[pos1, pos2, pos3, pos4, pos5]]) + if gp_value: + poly = np.repeat(str(poly), gp_value) + coordinates.append(poly) + coordinates = np.concatenate(coordinates).ravel().tolist() + else: + coordinates = [] + cell_width = list(files())[1][0]["posX"] + for _, file in enumerate(files()): + x, y = file[0]["posX"], file[0]["posY"] + pos1 = [x, y] + pos2 = [x + cell_width, y] + pos3 = [x + cell_width, y + cell_width] + pos4 = [x, y + cell_width] + pos5 = [x, y] + poly = str([[pos1, pos2, pos3, pos4, pos5]]) + coordinates.append(poly) + + mapped_coordinates = [] + for file, cor in zip(files(), coordinates): + filename = str(file[1][0]) + coord_dict = {"file": filename, "coordinates": cor} + mapped_coordinates.append(coord_dict) + + return mapped_coordinates + + +class PointSpec(Validator): + """Polygon is a two-dimensional planar shape with straight sides. + + This generates rectangular polygon coordinates from (x, y) coordinate positions. + + Args: + stitch_path: Path to the stitching vector, containing x and y image positions. + stitch_pattern: Pattern to parse image filenames in stitching vector. + group_by: Variable to group image filenames in stitching vector. + + Returns: + A list of tuples of centroids of a rectangular polygon.. + + """ + + stitch_path: str + stitch_pattern: str + group_by: Optional[str] = None + + @property + def get_coordinates(self) -> list[Any]: + """Generate rectangular polygon coordinates.""" + files = fp.FilePattern(self.stitch_path, self.stitch_pattern) + self.group_by = None if self.group_by == "None" else self.group_by + + if self.group_by is not None: + var_list = files.get_unique_values() + var_dict = {k: len(v) for k, v in var_list.items() if k == self.group_by} + gp_value = var_dict[self.group_by] + gp_dict = {self.group_by: gp_value} + + coordinates = [] + for i, matching in enumerate(files.get_matching(**gp_dict)): + if i == 0: + cell_width = matching[0]["posY"] + x, y = matching[0]["posX"], matching[0]["posY"] + x1 = x + y1 = y + cell_width + x2 = x + cell_width + y2 = y + position = ((x1 + x2) / 2, (y1 + y2) / 2) + if gp_value: + poly = np.repeat(str(position), gp_value) + coordinates.append(poly) + coordinates = np.concatenate(coordinates).ravel().tolist() + + else: + coordinates = [] + cell_width = list(files())[1][0]["posX"] + for _, file in enumerate(files()): + x, y = file[0]["posX"], file[0]["posY"] + x1 = x + y1 = y + cell_width + x2 = x + cell_width + y2 = y + position = ((x1 + x2) / 2, (y1 + y2) / 2) + coordinates.append(position) + + mapped_coordinates = [] + for file, cor in zip(files(), coordinates): + filename = str(file[1][0]) + coord_dict = {"file": filename, "coordinates": cor} + mapped_coordinates.append(coord_dict) + + return mapped_coordinates + + +class ValidatedProperties(mj.Properties): + """Properties with validation.""" + + @validator("string", pre=True, each_item=True) + def validate_str( + cls, + v: Union[str, None], + ) -> str: # noqa: N805 + """Validate string.""" + if v is None: + return "" + return v + + @validator("numeric", pre=True, each_item=True) + def validate_num( + cls, + v: Union[int, None], + ) -> Union[int, None]: # noqa: N805 + """Validate numeric.""" + if v is None: + return np.nan + return v + + +class RenderOverlayModel(CustomOverlayModel): + """Generate JSON overlays using microjson python package. + + Args: + file_path: Path to input file. + coordinates: List of geometry coordinates. + geometry_type: Type of geometry (Polygon, Points, bbbox). + out_dir: Path to output directory. + """ + + file_path: Path + coordinates: list[Any] + geometry_type: str + out_dir: Path + + @pydantic.validator("file_path", pre=True) + def validate_file_path(cls, value: Path) -> Path: # noqa: N805 + """Validate file path.""" + if not Path(value).exists(): + msg = "File path does not exists!! Please do check path again" + raise ValueError(msg) + if ( + Path(value).exists() + and not Path(value).name.startswith(".") + and Path(value).name.endswith(".csv") + ): + data = vaex.read_csv(Path(value)) + if data.shape[0] | data.shape[1] == 0: + msg = "data doesnot exists" + raise ValueError(msg) + + elif ( + Path(value).exists() + and not Path(value).name.startswith(".") + and Path(value).name.endswith(EXT) + ): + data = vaex.open(Path(value)) + if data.shape[0] | data.shape[1] == 0: + msg = "data doesnot exists" + raise ValueError(msg) + + return value + + @property + def microjson_overlay(self) -> None: + """Create microjson overlays in JSON Format.""" + if self.file_path.name.endswith((".csv", ".feather", ".arrow")): + data = convert_vaex_dataframe(self.file_path) + des_columns = [ + feature + for feature in data.get_column_names() + if data.data_type(feature) == str + ] + + int_columns = [ + feature + for feature in data.get_column_names() + if data.data_type(feature) == int or data.data_type(feature) == float + ] + + if len(int_columns) == 0: + msg = "Features with integer datatype do not exist" + raise ValueError(msg) + + if len(des_columns) == 0: + msg = "Descriptive features do not exist" + raise ValueError(msg) + + data["geometry_type"] = np.repeat(self.geometry_type, data.shape[0]) + data["type"] = np.repeat("Feature", data.shape[0]) + + excolumns = ["geometry_type", "type"] + + des_columns = [col for col in des_columns if col not in excolumns] + + features: list[mj.Feature] = [] + + for d, cor in zip(data.iterrows(), self.coordinates): + _, row = d + if row["intensity_image"] == cor["file"]: + desc = [{key: row[key]} for key in des_columns] + nume = [{key: row[key]} for key in int_columns] + + descriptive_dict = {} + for sub_dict in desc: + descriptive_dict.update(sub_dict) + + numeric_dict = {} + for sub_dict in nume: + numeric_dict.update(sub_dict) + + GeometryClass = getattr(mj, row["geometry_type"]) # noqa: N806 + cor_value = ast.literal_eval(cor["coordinates"]) + geometry = GeometryClass( + type=row["geometry_type"], + coordinates=cor_value, + ) + + # create a new properties object dynamically + properties = ValidatedProperties( + string=descriptive_dict, + numeric=numeric_dict, + ) + + # Create a new Feature object + feature = mj.MicroFeature( + type=row["type"], + geometry=geometry, + properties=properties, + ) + features.append(feature) + + valrange = [ + {i: {"min": data[i].min(), "max": data[i].max()}} for i in int_columns + ] + valrange_dict = {} + for sub_dict in valrange: + valrange_dict.update(sub_dict) + + # Create a list of descriptive fields + descriptive_fields = des_columns + + # Create a new FeatureCollection object + feature_collection = mj.MicroFeatureCollection( + type="FeatureCollection", + features=features, + value_range=valrange_dict, + descriptive_fields=descriptive_fields, + coordinatesystem={ + "axes": [ + { + "name": "x", + "unit": "micrometer", + "type": "cartesian", + "pixelsPerUnit": 1, + "description": "x-axis", + }, + { + "name": "y", + "unit": "micrometer", + "type": "cartesian", + "pixelsPerUnit": 1, + "description": "y-axis", + }, + ], + "origo": "top-left", + }, + ) + + if len(feature_collection.model_dump_json()) == 0: + msg = "JSON file is empty" + raise ValueError(msg) + if len(feature_collection.model_dump_json()) > 0: + out_name = Path(self.out_dir, f"{self.file_path.stem}_overlay.json") + with Path.open(out_name, "w") as f: + f.write( + feature_collection.model_dump_json( + indent=2, + exclude_unset=True, + ), + ) + logger.info(f"Saving overlay json file: {out_name}") diff --git a/visualization/tabular-to-microjson-tool/tabulartomicrojson.cwl b/visualization/tabular-to-microjson-tool/tabulartomicrojson.cwl new file mode 100644 index 0000000..2e2d5c4 --- /dev/null +++ b/visualization/tabular-to-microjson-tool/tabulartomicrojson.cwl @@ -0,0 +1,44 @@ +class: CommandLineTool +cwlVersion: v1.2 +inputs: + filePattern: + inputBinding: + prefix: --filePattern + type: string? + geometryType: + inputBinding: + prefix: --geometryType + type: string? + groupBy: + inputBinding: + prefix: --groupBy + type: string? + inpDir: + inputBinding: + prefix: --inpDir + type: Directory + outDir: + inputBinding: + prefix: --outDir + type: Directory + stitchDir: + inputBinding: + prefix: --stitchDir + type: Directory + stitchPattern: + inputBinding: + prefix: --stitchPattern + type: string +outputs: + outDir: + outputBinding: + glob: $(inputs.outDir.basename) + type: Directory +requirements: + DockerRequirement: + dockerPull: polusai/tabular-to-microjson-tool:0.1.2-dev0 + InitialWorkDirRequirement: + listing: + - entry: $(inputs.outDir) + writable: true + InlineJavascriptRequirement: {} diff --git a/visualization/tabular-to-microjson-tool/tests/__init__.py b/visualization/tabular-to-microjson-tool/tests/__init__.py new file mode 100644 index 0000000..c02efbd --- /dev/null +++ b/visualization/tabular-to-microjson-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Test for tabular to microjson package.""" diff --git a/visualization/tabular-to-microjson-tool/tests/test_microjson_overlay.py b/visualization/tabular-to-microjson-tool/tests/test_microjson_overlay.py new file mode 100644 index 0000000..7e8d7d8 --- /dev/null +++ b/visualization/tabular-to-microjson-tool/tests/test_microjson_overlay.py @@ -0,0 +1,244 @@ +"""Test for tabular to microjson package.""" +import json +import pathlib +import shutil +import string +import tempfile + +import numpy as np +import pandas as pd +import pytest +import vaex +from polus.images.visualization.tabular_to_microjson import microjson_overlay as mo +from polus.images.visualization.tabular_to_microjson.__main__ import app +from typer.testing import CliRunner + +runner = CliRunner() + + +@pytest.fixture() +def output_directory() -> pathlib.Path: + """Generate output directory.""" + return pathlib.Path(tempfile.mkdtemp(dir=pathlib.Path.cwd())) + + +@pytest.fixture() +def input_directory() -> pathlib.Path: + """Generate output directory.""" + return pathlib.Path(tempfile.mkdtemp(dir=pathlib.Path.cwd())) + + +def clean_directories() -> None: + """Remove all temporary directories.""" + for d in pathlib.Path(".").cwd().iterdir(): + if d.is_dir() and d.name.startswith("tmp"): + shutil.rmtree(d) + + +@pytest.fixture( + params=[ + (384, 2170, "Polygon", ".csv"), + ], +) +def get_params(request: pytest.FixtureRequest) -> tuple[int, int, str, str]: + """To get the parameter of the fixture.""" + return request.param + + +@pytest.fixture() +def generate_synthetic_data( + input_directory: pathlib.Path, + get_params: tuple[int, int, str, str], +) -> tuple[pathlib.Path, pathlib.Path]: + """Generate tabular data.""" + nrows, cell_width, geometry_type, file_extension = get_params + n = int(nrows / 384) + + rng = np.random.default_rng(42) + + pathlib.Path.mkdir(pathlib.Path(input_directory, "data")) + pathlib.Path.mkdir(pathlib.Path(input_directory, "stvector")) + + flist = [] + for x in range(16): + for y in range(24): + for p in range(n): + fname = ( + f"x{x}".zfill(2) + + f"_y{y}".zfill(2) + + f"_p{p}".zfill(2) + + "_c1.ome.tif" + ) + flist.append(fname) + position = (y * cell_width, x * cell_width) + stvector = ( + f"file: {fname}; corr: 0; position: {position}; grid: {(y, x)};" + ) + stitch_path = pathlib.Path(input_directory, "stvector/data.txt") + with pathlib.Path.open(stitch_path, "a") as file: + file.write(f"{stvector}\n") + file.close() + diction_1 = { + "intensity_image": flist, + "Plate": np.repeat("preZ", nrows).tolist(), + "Well": [ + f"{s}{num}" + for s in string.ascii_letters.upper()[:16] + for num in range(24) + for p in range(n) + ], + "Characteristics [Organism 2]": np.repeat( + "Herpes simplex virus type 1", + nrows, + ).tolist(), + "Characteristics [Cell Line]": np.repeat("A549", nrows).tolist(), + "Compound Name": [rng.choice(["DMSO", "Ganciclovir"]) for i in range(nrows)], + "Control Type": [ + rng.choice(["negative control", "positive control"]) for i in range(nrows) + ], + "numberOfNuclei": rng.integers( + low=2500, + high=100000, + size=nrows, + ), + "maxVirusIntensity": rng.integers( + low=500, + high=30000, + size=nrows, + ), + } + + df = pd.DataFrame(diction_1) + if file_extension == ".csv": + outpath = pathlib.Path(input_directory, "data/data.csv") + df.to_csv(outpath, index=False) + if file_extension == ".feather": + outpath = pathlib.Path(input_directory, "data/data.feather") + df.to_feather(outpath) + if file_extension == ".arrow": + outpath = pathlib.Path(input_directory, "data/data.arrow") + df.to_feather(outpath) + + return outpath, stitch_path + + +def test_convert_vaex_dataframe( + generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], +) -> None: + """Converting tabular data to vaex dataframe.""" + outpath, _ = generate_synthetic_data + vaex_df = mo.convert_vaex_dataframe(outpath) + assert type(vaex_df) == vaex.dataframe.DataFrameLocal + assert len(list(vaex_df.columns)) != 0 + assert vaex_df.shape[0] > 0 + clean_directories() + + +def test_generate_polygon_coordinates( + generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], +) -> None: + """Test generating polygon coordinates using stitching vector.""" + _, stitch_dir = generate_synthetic_data + stitch_pattern = "x{x:dd}_y{y:dd}_p{p:d}_c{c:d}.ome.tif" + group_by = None + + model = mo.PolygonSpec( + stitch_path=str(stitch_dir), + stitch_pattern=stitch_pattern, + group_by=group_by, + ) + poly = model.get_coordinates + assert all(len(i) for p in poly[0]["coordinates"] for i in p) is True + clean_directories() + + +def test_generate_rectangular_polygon_centroids( + generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], +) -> None: + """Test generating centroid rectangular coordinates using stitching vector.""" + _, stitch_dir = generate_synthetic_data + stitch_pattern = "x{x:dd}_y{y:dd}_p{p:d}_c{c:d}.ome.tif" + group_by = None + model = mo.PointSpec( + stitch_path=str(stitch_dir), + stitch_pattern=stitch_pattern, + group_by=group_by, + ) + poly = model.get_coordinates + expected_len = 2 + assert len(poly[0]["coordinates"]) == expected_len + clean_directories() + + +def test_render_overlay_model( + generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], + output_directory: pathlib.Path, + get_params: tuple[int, int, str, str], +) -> None: + """Test render overlay model.""" + inp_dir, stitch_dir = generate_synthetic_data + stitch_pattern = "x{x:dd}_y{y:dd}_p{p:d}_c{c:d}.ome.tif" + _, _, geometry_type, _ = get_params + group_by = None + + if geometry_type == "Polygon": + model = mo.PolygonSpec( + stitch_path=str(stitch_dir), + stitch_pattern=stitch_pattern, + group_by=group_by, + ) + + if geometry_type == "Point": + model = mo.PointSpec( + stitch_path=str(stitch_dir), + stitch_pattern=stitch_pattern, + group_by=group_by, + ) + poly = model.get_coordinates + + microjson = mo.RenderOverlayModel( + file_path=inp_dir, + coordinates=poly, + geometry_type=geometry_type, + out_dir=output_directory, + ) + mjson = microjson.microjson_overlay + out_file = pathlib.Path(output_directory, "data_overlay.json") + with pathlib.Path.open(out_file) as jfile: + mjson = json.load(jfile) + assert len(mjson) != 0 + clean_directories() + + +def test_cli( + generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], + output_directory: pathlib.Path, + get_params: tuple[int, int, str, str], +) -> None: + """Test Cli.""" + inp_dir, stitch_dir = generate_synthetic_data + + stitch_pattern = "x{x:dd}_y{y:dd}_p{p:d}_c{c:d}.ome.tif" + _, _, geometry_type, _ = get_params + + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir.parent, + "--stitchDir", + stitch_dir.parent, + "--filePattern", + ".+", + "--stitchPattern", + stitch_pattern, + "--groupBy", + None, + "--geometryType", + geometry_type, + "--outDir", + pathlib.Path(output_directory), + ], + ) + assert result.exit_code == 0 + clean_directories() From 4af00d95118ec16bd37503b46c8826fde0e2ecc3 Mon Sep 17 00:00:00 2001 From: hamshkhawar Date: Tue, 9 Apr 2024 08:17:04 -0500 Subject: [PATCH 2/6] updated plugins and fixed tests --- VERSION | 2 +- .../feature-subsetting-tool}/.bumpversion.cfg | 5 +- .../feature-subsetting-tool}/Dockerfile | 9 +- .../README.md | 52 +- clustering/feature-subsetting-tool/VERSION | 1 + .../feature-subsetting-tool/build-docker.sh | 4 + .../example/summary.txt | 14 + .../package-release.sh | 6 +- .../feature-subsetting-tool/plugin.json | 173 + .../feature-subsetting-tool/pyproject.toml | 30 + .../feature-subsetting-tool/run-docker.sh | 32 + .../clustering/feature_subsetting/__init__.py | 3 + .../clustering/feature_subsetting/__main__.py | 155 + .../feature_subsetting/feature_subset.py | 300 + .../feature-subsetting-tool/tests/__init__.py | 1 + .../feature-subsetting-tool/tests/conftest.py | 58 + .../feature-subsetting-tool/tests/test_cli.py | 92 + .../tests/test_feature_subsetting.py | 72 + .../hdbscan-clustering-tool}/.bumpversion.cfg | 5 +- clustering/hdbscan-clustering-tool/.gitignore | 23 + .../hdbscan-clustering-tool}/Dockerfile | 7 +- .../README.md | 28 +- clustering/hdbscan-clustering-tool/VERSION | 1 + .../hdbscan-clustering-tool/build-docker.sh | 4 + .../package-release.sh | 5 +- .../hdbscan-clustering-tool/plugin.json | 123 + .../hdbscan-clustering-tool/pyproject.toml | 32 + .../hdbscan-clustering-tool/run-docker.sh | 23 + .../clustering/hdbscan_clustering/__init__.py | 4 + .../clustering/hdbscan_clustering/__main__.py | 156 + .../hdbscan_clustering/hdbscan_clustering.py | 150 + .../hdbscan-clustering-tool/tests/__init__.py | 1 + .../hdbscan-clustering-tool/tests/conftest.py | 48 + .../hdbscan-clustering-tool/tests/test_cli.py | 74 + .../tests/test_hdbscan_clustering.py | 49 + .../Dockerfile | 24 - .../polus-feature-subsetting-plugin/VERSION | 1 - .../build-docker.sh | 4 - .../featuresubsetting.cwl | 60 - .../polus-feature-subsetting-plugin/ict.yaml | 123 - .../plugin.json | 139 - .../src/main.py | 288 - .../src/requirements.txt | 2 - .../Dockerfile | 10 - .../polus-hdbscan-clustering-plugin/VERSION | 1 - .../build-docker.sh | 4 - .../hdbscanclustering.cwl | 44 - .../polus-hdbscan-clustering-plugin/ict.yaml | 82 - .../plugin.json | 89 - .../run-docker.sh | 25 - .../src/main.py | 176 - .../src/requirements.txt | 2 - .../.bumpversion.cfg | 8 +- .../feature-segmentation-eval-tool/Dockerfile | 4 +- .../feature-segmentation-eval-tool/README.md | 2 +- .../feature-segmentation-eval-tool/VERSION | 2 +- .../feature-segmentation-eval-tool/ict.yaml | 6 +- .../plugin.json | 10 +- .../pyproject.toml | 7 +- .../feature_segmentation_eval/__init__.py | 2 +- .../feature_segmentation_eval/__main__.py | 4 +- .../feature_evaluation.py | 0 .../feature_segmentation_eval/metrics.py | 0 .../tests/test_cli.py | 2 +- .../tests/test_feature_single.py | 2 +- .../arrow-to-tabular-tool/.bumpversion.cfg | 27 - formats/arrow-to-tabular-tool/.gitignore | 175 - formats/arrow-to-tabular-tool/README.md | 29 - formats/arrow-to-tabular-tool/VERSION | 1 - .../arrow-to-tabular-tool/arrowtotabular.cwl | 28 - formats/arrow-to-tabular-tool/build-docker.sh | 4 - formats/arrow-to-tabular-tool/ict.yaml | 45 - formats/arrow-to-tabular-tool/plugin.json | 59 - formats/arrow-to-tabular-tool/pyproject.toml | 32 - formats/arrow-to-tabular-tool/run-plugin.sh | 25 - .../formats/arrow_to_tabular/__init__.py | 4 - .../formats/arrow_to_tabular/__main__.py | 111 - .../arrow_to_tabular/arrow_to_tabular.py | 53 - .../arrow-to-tabular-tool/tests/__init__.py | 1 - .../arrow-to-tabular-tool/tests/test_main.py | 69 - .../Dockerfile | 8 - .../README.md | 31 - .../polus-fcs-to-csv-converter-plugin/VERSION | 1 - .../build-docker.sh | 4 - .../fcstocsvfileconverter.cwl | 24 - .../ict.yaml | 30 - .../plugin.json | 34 - .../src/main.py | 78 - .../src/requirements.txt | 1 - .../tabular-converter-tool/.bumpversion.cfg | 8 +- formats/tabular-converter-tool/Dockerfile | 4 +- formats/tabular-converter-tool/README.md | 2 +- formats/tabular-converter-tool/VERSION | 2 +- formats/tabular-converter-tool/ict.yaml | 8 +- formats/tabular-converter-tool/plugin.json | 10 +- formats/tabular-converter-tool/pyproject.toml | 7 +- .../formats/tabular_converter/__init__.py | 2 +- .../formats/tabular_converter/__main__.py | 4 +- .../tabular_converter/tabular_converter.py | 0 .../tabularconverter.cwl | 2 +- .../tabular-converter-tool/tests/test_main.py | 2 +- formats/tabular-to-arrow-tool/.gitignore | 175 - formats/tabular-to-arrow-tool/Dockerfile | 20 - formats/tabular-to-arrow-tool/README.md | 34 - formats/tabular-to-arrow-tool/VERSION | 1 - formats/tabular-to-arrow-tool/build-docker.sh | 4 - formats/tabular-to-arrow-tool/ict.yaml | 48 - formats/tabular-to-arrow-tool/plugin.json | 60 - formats/tabular-to-arrow-tool/pyproject.toml | 33 - formats/tabular-to-arrow-tool/run-plugin.sh | 24 - .../formats/tabular_to_arrow/__init__.py | 7 - .../formats/tabular_to_arrow/__main__.py | 98 - .../tabular_arrow_converter.py | 131 - .../tabular-to-arrow-tool/tests/__init__.py | 1 - .../tabular-to-arrow-tool/tests/test_main.py | 138 - package.json | 8 +- pyproject.toml | 4 +- ruff.toml | 10 +- src/polus/tabular/__init__.py | 65 + src/polus/tabular/_plugins/VERSION | 1 + src/polus/tabular/_plugins/__init__.py | 0 src/polus/tabular/_plugins/_compat.py | 4 + .../tabular/_plugins/classes/__init__.py | 27 + .../tabular/_plugins/classes/plugin_base.py | 311 + .../_plugins/classes/plugin_classes.py | 472 + src/polus/tabular/_plugins/cwl/__init__.py | 3 + src/polus/tabular/_plugins/cwl/base.cwl | 17 + src/polus/tabular/_plugins/cwl/cwl.py | 7 + src/polus/tabular/_plugins/gh.py | 65 + src/polus/tabular/_plugins/io/__init__.py | 21 + src/polus/tabular/_plugins/io/_io.py | 597 + .../tabular/_plugins/manifests/__init__.py | 15 + .../_plugins/manifests/manifest_utils.py | 210 + .../_plugins/models/PolusComputeSchema.json | 499 + .../_plugins/models/PolusComputeSchema.ts | 102 + src/polus/tabular/_plugins/models/__init__.py | 35 + .../models/pydanticv1/PolusComputeSchema.py | 137 + .../models/pydanticv1/WIPPPluginSchema.py | 233 + .../_plugins/models/pydanticv1/__init__.py | 0 .../_plugins/models/pydanticv1/compute.py | 28 + .../_plugins/models/pydanticv1/wipp.py | 79 + .../models/pydanticv2/PolusComputeSchema.py | 136 + .../models/pydanticv2/WIPPPluginSchema.py | 241 + .../_plugins/models/pydanticv2/__init__.py | 0 .../_plugins/models/pydanticv2/compute.py | 28 + .../_plugins/models/pydanticv2/wipp.py | 79 + .../models/wipp-plugin-manifest-schema.json | 726 + src/polus/tabular/_plugins/registry.py | 280 + src/polus/tabular/_plugins/registry_utils.py | 135 + src/polus/tabular/_plugins/update/__init__.py | 6 + src/polus/tabular/_plugins/update/_update.py | 116 + src/polus/tabular/_plugins/utils.py | 17 + tests/__init__.py | 1 + tests/resources/b1.json | 77 + tests/resources/b2.json | 76 + tests/resources/b3.json | 76 + tests/resources/g1.json | 78 + tests/resources/g2.json | 77 + tests/resources/g3.json | 77 + tests/resources/omeconverter022.json | 45 + tests/resources/tabularconverter.json | 75 + .../resources/target1.cwl | 8 +- tests/test_cwl.py | 105 + tests/test_io.py | 69 + tests/test_manifests.py | 236 + tests/test_plugins.py | 198 + tests/test_version.py | 171 + .../tabular-merger-tool/.bumpversion.cfg | 6 +- transforms/tabular-merger-tool/Dockerfile | 5 +- transforms/tabular-merger-tool/README.md | 2 +- transforms/tabular-merger-tool/VERSION | 2 +- transforms/tabular-merger-tool/plugin.json | 10 +- transforms/tabular-merger-tool/pyproject.toml | 8 +- .../transforms}/tabular_merger/__init__.py | 2 +- .../transforms}/tabular_merger/__main__.py | 4 +- .../tabular_merger/tabular_merger.py | 0 .../tabular-merger-tool/tests/test_main.py | 2 +- .../.bumpversion.cfg | 10 +- .../tabular-thresholding-tool/Dockerfile | 4 +- .../tabular-thresholding-tool/README.md | 2 +- transforms/tabular-thresholding-tool/VERSION | 2 +- transforms/tabular-thresholding-tool/ict.yaml | 8 +- .../tabular-thresholding-tool/plugin.json | 10 +- .../tabular-thresholding-tool/pyproject.toml | 8 +- .../tabular_thresholding/__init__.py | 2 +- .../tabular_thresholding/__main__.py | 4 +- .../tabular_thresholding.py | 2 +- .../thresholding/__init__.py | 0 .../thresholding/custom_fpr.py | 0 .../thresholding/n_sigma.py | 0 .../tabular_thresholding/thresholding/otsu.py | 0 .../tabular-thresholding-plugin.cwl | 2 +- .../tests/test_main.py | 2 +- utils/polus-python-template/.bumpversion.cfg | 2 +- utils/polus-python-template/CHANGELOG.md | 5 + utils/polus-python-template/README.md | 12 +- .../hooks/post_gen_project.py | 8 +- .../hooks/pre_gen_project.py | 12 +- utils/polus-python-template/pyproject.toml | 2 +- .../Dockerfile | 2 +- .../plugin.json | 116 +- .../__main__.py | 2 +- .../ict.yaml | 2 +- utils/rxiv-download-tool/.bumpversion.cfg | 8 +- utils/rxiv-download-tool/Dockerfile | 2 +- utils/rxiv-download-tool/README.md | 4 +- utils/rxiv-download-tool/VERSION | 2 +- .../downloadrxivtextdata.cwl | 2 +- utils/rxiv-download-tool/ict.yaml | 8 +- utils/rxiv-download-tool/plugin.json | 10 +- utils/rxiv-download-tool/pyproject.toml | 4 +- .../utils/rxiv_download/__init__.py | 2 +- .../utils/rxiv_download/__main__.py | 6 +- .../utils/rxiv_download/fetch.py | 0 utils/rxiv-download-tool/tests/test_cli.py | 2 +- utils/rxiv-download-tool/tests/test_fetch.py | 2 +- .../ict.yaml | 2 +- .../plugin.json | 4 +- .../tabular-to-microjson-tool/README.md | 62 - .../tabular-to-microjson-tool/VERSION | 1 - .../tabular-to-microjson-tool/build-docker.sh | 4 - .../examples/example_overlay_Point.json | 9255 --------- .../examples/example_overlay_Polygon.json | 16935 ---------------- .../tabular-to-microjson-tool/ict.yaml | 80 - .../tabular-to-microjson-tool/plugin.json | 96 - .../tabular-to-microjson-tool/pyproject.toml | 30 - .../tabular-to-microjson-tool/run-plugin.sh | 28 - .../tabular_to_microjson/__init__.py | 2 - .../tabular_to_microjson/__main__.py | 124 - .../tabular_to_microjson/microjson_overlay.py | 413 - .../tabulartomicrojson.cwl | 44 - .../tests/__init__.py | 1 - .../tests/test_microjson_overlay.py | 244 - 233 files changed, 7977 insertions(+), 30307 deletions(-) rename {visualization/tabular-to-microjson-tool => clustering/feature-subsetting-tool}/.bumpversion.cfg (78%) rename {visualization/tabular-to-microjson-tool => clustering/feature-subsetting-tool}/Dockerfile (77%) rename clustering/{polus-feature-subsetting-plugin => feature-subsetting-tool}/README.md (77%) create mode 100644 clustering/feature-subsetting-tool/VERSION create mode 100644 clustering/feature-subsetting-tool/build-docker.sh create mode 100644 clustering/feature-subsetting-tool/example/summary.txt rename {formats/tabular-to-arrow-tool => clustering/feature-subsetting-tool}/package-release.sh (74%) mode change 100755 => 100644 create mode 100644 clustering/feature-subsetting-tool/plugin.json create mode 100644 clustering/feature-subsetting-tool/pyproject.toml create mode 100644 clustering/feature-subsetting-tool/run-docker.sh create mode 100644 clustering/feature-subsetting-tool/src/polus/tabular/clustering/feature_subsetting/__init__.py create mode 100644 clustering/feature-subsetting-tool/src/polus/tabular/clustering/feature_subsetting/__main__.py create mode 100644 clustering/feature-subsetting-tool/src/polus/tabular/clustering/feature_subsetting/feature_subset.py create mode 100644 clustering/feature-subsetting-tool/tests/__init__.py create mode 100644 clustering/feature-subsetting-tool/tests/conftest.py create mode 100644 clustering/feature-subsetting-tool/tests/test_cli.py create mode 100644 clustering/feature-subsetting-tool/tests/test_feature_subsetting.py rename {formats/tabular-to-arrow-tool => clustering/hdbscan-clustering-tool}/.bumpversion.cfg (78%) create mode 100644 clustering/hdbscan-clustering-tool/.gitignore rename {formats/arrow-to-tabular-tool => clustering/hdbscan-clustering-tool}/Dockerfile (68%) rename clustering/{polus-hdbscan-clustering-plugin => hdbscan-clustering-tool}/README.md (67%) create mode 100644 clustering/hdbscan-clustering-tool/VERSION create mode 100755 clustering/hdbscan-clustering-tool/build-docker.sh rename {formats/arrow-to-tabular-tool => clustering/hdbscan-clustering-tool}/package-release.sh (74%) mode change 100755 => 100644 create mode 100644 clustering/hdbscan-clustering-tool/plugin.json create mode 100644 clustering/hdbscan-clustering-tool/pyproject.toml create mode 100755 clustering/hdbscan-clustering-tool/run-docker.sh create mode 100644 clustering/hdbscan-clustering-tool/src/polus/tabular/clustering/hdbscan_clustering/__init__.py create mode 100644 clustering/hdbscan-clustering-tool/src/polus/tabular/clustering/hdbscan_clustering/__main__.py create mode 100644 clustering/hdbscan-clustering-tool/src/polus/tabular/clustering/hdbscan_clustering/hdbscan_clustering.py create mode 100644 clustering/hdbscan-clustering-tool/tests/__init__.py create mode 100644 clustering/hdbscan-clustering-tool/tests/conftest.py create mode 100644 clustering/hdbscan-clustering-tool/tests/test_cli.py create mode 100644 clustering/hdbscan-clustering-tool/tests/test_hdbscan_clustering.py delete mode 100644 clustering/polus-feature-subsetting-plugin/Dockerfile delete mode 100644 clustering/polus-feature-subsetting-plugin/VERSION delete mode 100644 clustering/polus-feature-subsetting-plugin/build-docker.sh delete mode 100644 clustering/polus-feature-subsetting-plugin/featuresubsetting.cwl delete mode 100644 clustering/polus-feature-subsetting-plugin/ict.yaml delete mode 100644 clustering/polus-feature-subsetting-plugin/plugin.json delete mode 100644 clustering/polus-feature-subsetting-plugin/src/main.py delete mode 100644 clustering/polus-feature-subsetting-plugin/src/requirements.txt delete mode 100644 clustering/polus-hdbscan-clustering-plugin/Dockerfile delete mode 100644 clustering/polus-hdbscan-clustering-plugin/VERSION delete mode 100755 clustering/polus-hdbscan-clustering-plugin/build-docker.sh delete mode 100644 clustering/polus-hdbscan-clustering-plugin/hdbscanclustering.cwl delete mode 100644 clustering/polus-hdbscan-clustering-plugin/ict.yaml delete mode 100644 clustering/polus-hdbscan-clustering-plugin/plugin.json delete mode 100755 clustering/polus-hdbscan-clustering-plugin/run-docker.sh delete mode 100644 clustering/polus-hdbscan-clustering-plugin/src/main.py delete mode 100644 clustering/polus-hdbscan-clustering-plugin/src/requirements.txt rename features/feature-segmentation-eval-tool/src/polus/{images => tabular}/features/feature_segmentation_eval/__init__.py (79%) rename features/feature-segmentation-eval-tool/src/polus/{images => tabular}/features/feature_segmentation_eval/__main__.py (95%) rename features/feature-segmentation-eval-tool/src/polus/{images => tabular}/features/feature_segmentation_eval/feature_evaluation.py (100%) rename features/feature-segmentation-eval-tool/src/polus/{images => tabular}/features/feature_segmentation_eval/metrics.py (100%) delete mode 100644 formats/arrow-to-tabular-tool/.bumpversion.cfg delete mode 100644 formats/arrow-to-tabular-tool/.gitignore delete mode 100644 formats/arrow-to-tabular-tool/README.md delete mode 100644 formats/arrow-to-tabular-tool/VERSION delete mode 100644 formats/arrow-to-tabular-tool/arrowtotabular.cwl delete mode 100755 formats/arrow-to-tabular-tool/build-docker.sh delete mode 100644 formats/arrow-to-tabular-tool/ict.yaml delete mode 100644 formats/arrow-to-tabular-tool/plugin.json delete mode 100644 formats/arrow-to-tabular-tool/pyproject.toml delete mode 100755 formats/arrow-to-tabular-tool/run-plugin.sh delete mode 100644 formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/__init__.py delete mode 100644 formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/__main__.py delete mode 100644 formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/arrow_to_tabular.py delete mode 100644 formats/arrow-to-tabular-tool/tests/__init__.py delete mode 100644 formats/arrow-to-tabular-tool/tests/test_main.py delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/Dockerfile delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/README.md delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/VERSION delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/build-docker.sh delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/fcstocsvfileconverter.cwl delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/ict.yaml delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/plugin.json delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/src/main.py delete mode 100644 formats/polus-fcs-to-csv-converter-plugin/src/requirements.txt rename formats/tabular-converter-tool/src/polus/{images => tabular}/formats/tabular_converter/__init__.py (80%) rename formats/tabular-converter-tool/src/polus/{images => tabular}/formats/tabular_converter/__main__.py (95%) rename formats/tabular-converter-tool/src/polus/{images => tabular}/formats/tabular_converter/tabular_converter.py (100%) delete mode 100644 formats/tabular-to-arrow-tool/.gitignore delete mode 100644 formats/tabular-to-arrow-tool/Dockerfile delete mode 100644 formats/tabular-to-arrow-tool/README.md delete mode 100644 formats/tabular-to-arrow-tool/VERSION delete mode 100755 formats/tabular-to-arrow-tool/build-docker.sh delete mode 100644 formats/tabular-to-arrow-tool/ict.yaml delete mode 100644 formats/tabular-to-arrow-tool/plugin.json delete mode 100644 formats/tabular-to-arrow-tool/pyproject.toml delete mode 100755 formats/tabular-to-arrow-tool/run-plugin.sh delete mode 100644 formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/__init__.py delete mode 100644 formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/__main__.py delete mode 100644 formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/tabular_arrow_converter.py delete mode 100644 formats/tabular-to-arrow-tool/tests/__init__.py delete mode 100644 formats/tabular-to-arrow-tool/tests/test_main.py create mode 100644 src/polus/tabular/__init__.py create mode 100644 src/polus/tabular/_plugins/VERSION create mode 100644 src/polus/tabular/_plugins/__init__.py create mode 100644 src/polus/tabular/_plugins/_compat.py create mode 100644 src/polus/tabular/_plugins/classes/__init__.py create mode 100644 src/polus/tabular/_plugins/classes/plugin_base.py create mode 100644 src/polus/tabular/_plugins/classes/plugin_classes.py create mode 100644 src/polus/tabular/_plugins/cwl/__init__.py create mode 100644 src/polus/tabular/_plugins/cwl/base.cwl create mode 100644 src/polus/tabular/_plugins/cwl/cwl.py create mode 100644 src/polus/tabular/_plugins/gh.py create mode 100644 src/polus/tabular/_plugins/io/__init__.py create mode 100644 src/polus/tabular/_plugins/io/_io.py create mode 100644 src/polus/tabular/_plugins/manifests/__init__.py create mode 100644 src/polus/tabular/_plugins/manifests/manifest_utils.py create mode 100644 src/polus/tabular/_plugins/models/PolusComputeSchema.json create mode 100644 src/polus/tabular/_plugins/models/PolusComputeSchema.ts create mode 100644 src/polus/tabular/_plugins/models/__init__.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv1/PolusComputeSchema.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv1/WIPPPluginSchema.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv1/__init__.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv1/compute.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv1/wipp.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv2/PolusComputeSchema.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv2/WIPPPluginSchema.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv2/__init__.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv2/compute.py create mode 100644 src/polus/tabular/_plugins/models/pydanticv2/wipp.py create mode 100644 src/polus/tabular/_plugins/models/wipp-plugin-manifest-schema.json create mode 100644 src/polus/tabular/_plugins/registry.py create mode 100644 src/polus/tabular/_plugins/registry_utils.py create mode 100644 src/polus/tabular/_plugins/update/__init__.py create mode 100644 src/polus/tabular/_plugins/update/_update.py create mode 100644 src/polus/tabular/_plugins/utils.py create mode 100644 tests/__init__.py create mode 100644 tests/resources/b1.json create mode 100644 tests/resources/b2.json create mode 100644 tests/resources/b3.json create mode 100644 tests/resources/g1.json create mode 100644 tests/resources/g2.json create mode 100644 tests/resources/g3.json create mode 100644 tests/resources/omeconverter022.json create mode 100644 tests/resources/tabularconverter.json rename formats/tabular-to-arrow-tool/tabulartoarrow.cwl => tests/resources/target1.cwl (78%) create mode 100644 tests/test_cwl.py create mode 100644 tests/test_io.py create mode 100644 tests/test_manifests.py create mode 100644 tests/test_plugins.py create mode 100644 tests/test_version.py rename transforms/tabular-merger-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_merger/__init__.py (65%) rename transforms/tabular-merger-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_merger/__main__.py (95%) rename transforms/tabular-merger-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_merger/tabular_merger.py (100%) rename transforms/tabular-thresholding-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_thresholding/__init__.py (70%) rename transforms/tabular-thresholding-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_thresholding/__main__.py (97%) rename transforms/tabular-thresholding-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_thresholding/tabular_thresholding.py (98%) rename transforms/tabular-thresholding-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_thresholding/thresholding/__init__.py (100%) rename transforms/tabular-thresholding-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_thresholding/thresholding/custom_fpr.py (100%) rename transforms/tabular-thresholding-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_thresholding/thresholding/n_sigma.py (100%) rename transforms/tabular-thresholding-tool/src/polus/{images/transforms/tabular => tabular/transforms}/tabular_thresholding/thresholding/otsu.py (100%) rename utils/rxiv-download-tool/src/polus/{images => tabular}/utils/rxiv_download/__init__.py (51%) rename utils/rxiv-download-tool/src/polus/{images => tabular}/utils/rxiv_download/__main__.py (88%) rename utils/rxiv-download-tool/src/polus/{images => tabular}/utils/rxiv_download/fetch.py (100%) delete mode 100644 visualization/tabular-to-microjson-tool/README.md delete mode 100644 visualization/tabular-to-microjson-tool/VERSION delete mode 100644 visualization/tabular-to-microjson-tool/build-docker.sh delete mode 100644 visualization/tabular-to-microjson-tool/examples/example_overlay_Point.json delete mode 100644 visualization/tabular-to-microjson-tool/examples/example_overlay_Polygon.json delete mode 100644 visualization/tabular-to-microjson-tool/ict.yaml delete mode 100644 visualization/tabular-to-microjson-tool/plugin.json delete mode 100644 visualization/tabular-to-microjson-tool/pyproject.toml delete mode 100644 visualization/tabular-to-microjson-tool/run-plugin.sh delete mode 100644 visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/__init__.py delete mode 100644 visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/__main__.py delete mode 100644 visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/microjson_overlay.py delete mode 100644 visualization/tabular-to-microjson-tool/tabulartomicrojson.cwl delete mode 100644 visualization/tabular-to-microjson-tool/tests/__init__.py delete mode 100644 visualization/tabular-to-microjson-tool/tests/test_microjson_overlay.py diff --git a/VERSION b/VERSION index 6e8bf73..17e51c3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.1.0 +0.1.1 diff --git a/visualization/tabular-to-microjson-tool/.bumpversion.cfg b/clustering/feature-subsetting-tool/.bumpversion.cfg similarity index 78% rename from visualization/tabular-to-microjson-tool/.bumpversion.cfg rename to clustering/feature-subsetting-tool/.bumpversion.cfg index 94c23e6..e576e44 100644 --- a/visualization/tabular-to-microjson-tool/.bumpversion.cfg +++ b/clustering/feature-subsetting-tool/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.2-dev0 +current_version = 0.2.1-dev0 commit = True tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? @@ -21,7 +21,8 @@ search = version = "{current_version}" replace = version = "{new_version}" [bumpversion:file:plugin.json] +[bumpversion:file:README.md] [bumpversion:file:VERSION] -[bumpversion:file:src/polus/images/visualization/tabular_to_microjson/__init__.py] +[bumpversion:file:src/polus/tabular/clustering/feature_subsetting/__init__.py] diff --git a/visualization/tabular-to-microjson-tool/Dockerfile b/clustering/feature-subsetting-tool/Dockerfile similarity index 77% rename from visualization/tabular-to-microjson-tool/Dockerfile rename to clustering/feature-subsetting-tool/Dockerfile index 4b00db3..cb19bc8 100644 --- a/visualization/tabular-to-microjson-tool/Dockerfile +++ b/clustering/feature-subsetting-tool/Dockerfile @@ -1,11 +1,10 @@ -#!/bin/bash -FROM polusai/bfio:2.1.9 +FROM polusai/bfio:2.3.6 # environment variables defined in polusai/bfio ENV EXEC_DIR="/opt/executables" -ENV POLUS_LOG="INFO" ENV POLUS_IMG_EXT=".ome.tif" ENV POLUS_TAB_EXT=".csv" +ENV POLUS_LOG="INFO" # Work directory defined in the base container WORKDIR ${EXEC_DIR} @@ -14,7 +13,9 @@ COPY pyproject.toml ${EXEC_DIR} COPY VERSION ${EXEC_DIR} COPY README.md ${EXEC_DIR} COPY src ${EXEC_DIR}/src + RUN pip3 install ${EXEC_DIR} --no-cache-dir -ENTRYPOINT ["python3", "-m", "polus.images.visualization.tabular_to_microjson"] + +ENTRYPOINT ["python3", "-m", "polus.tabular.clustering.feature_subsetting"] CMD ["--help"] diff --git a/clustering/polus-feature-subsetting-plugin/README.md b/clustering/feature-subsetting-tool/README.md similarity index 77% rename from clustering/polus-feature-subsetting-plugin/README.md rename to clustering/feature-subsetting-tool/README.md index 24ccba6..7bc8231 100644 --- a/clustering/polus-feature-subsetting-plugin/README.md +++ b/clustering/feature-subsetting-tool/README.md @@ -1,24 +1,25 @@ -# Feature Data Subset +# Feature Data Subset(v0.2.1-dev0) -This WIPP plugin subsets data based on a given feature. It works in conjunction with the `polus-feature-extraction-plugin`, where the feature extraction plugin can be used to extract the features such as the mean intensity of every image in the input image collection. +This WIPP plugin subsets data based on a given feature. It works in conjunction with the `polus-feature-extraction-plugin`, where the feature extraction plugin can be used to extract the features such as the mean intensity of every image in the input image collection. # Usage -The details and usage of the plugin inputs is provided in the section below. In addition to the subsetted data, the output directory also consists of a `summary.txt` file which has information as to what images were kept and their new filename if they were renamed. - -### Explanation of inputs -Some of the inputs are pretty straighforward and are used commonly across most WIPP plugins. This section is used to provide some details and examples of the inputs that may be a little complicated. The image collection with the following pattern will be used as an example : `r{r+}_t{t+}_p{p+}_z{z+}_c{c+}.ome.tif`, where r,t,p,z,c stand for replicate, timepoint, positon,z-positon, and channel respectively. Consider we have 5 replicates, 3 timepoints, 50 positions, 10 z-planes and 4 channels. - -1. `inpDir` - This contains the path to the input image collection to subset data from. -2. `filePattern` - Filepattern of the input images -3. `groupVar` - This is a mandatory input across which to subset data. This can take either 1 or 2 variables as input and if 2 variables are provided then the second variable will be treated as the minor grouping variable. In our example, if the `z` is provided as input, then within a subcollection, the mean of the feature value will be taken for all images with the same z. Then the z positions will be filtered out based on the input of `percentile` and `removeDirection` variables. Now if `z,c` are provided as input, then 'c' will be treated as the minor grouping variable which means that the mean will be taken for all images with the same z for each channel. Also, the plugin will ensures that the same values of z positions are filtered out across c. -4. `csvDir` - This contains the path to the csv collection containing the feature values for each image. This can be the output of the feature extraction plugin. -5. `feature` - The column name from the csv file that will be used to filter images -6. `percentile` and `removeDirection` - These two variables denote the critieria with which images are filtered. For example, if percentile is `0.1` and removeDirection is set to `Below` then images with feature value below the 10th percentile will be removed. On the other hand, if removeDirection is set to above then all images with feature value greater than the 10th pecentile will be removed. This enables data subsetting from both `brighfield` and `darkfield` microscopy images. - - **Optional Arguments** - +The details and usage of the plugin inputs is provided in the section below. In addition to the subsetted data, the output directory also consists of a `summary.txt` file which has information as to what images were kept and their new filename if they were renamed. + +### Explanation of inputs +Some of the inputs are pretty straighforward and are used commonly across most WIPP plugins. This section is used to provide some details and examples of the inputs that may be a little complicated. The image collection with the following pattern will be used as an example : `r{r+}_t{t+}_p{p+}_z{z+}_c{c+}.ome.tif`, where r,t,p,z,c stand for replicate, timepoint, positon,z-positon, and channel respectively. Consider we have 5 replicates, 3 timepoints, 50 positions, 10 z-planes and 4 channels. + +1. `inpDir` - This contains the path to the input image collection to subset data from. +2. `tabularDir` This contains the path to the tabular files with file formats (`.csv`, `.arrow`, `.parquet`) containing the feature values for each image. This can be the output of the feature extraction or nyxus plugin +3. `filePattern` - Filepattern of the input images +4. `imageFeature` - Tabular data featuring image filenames +5. `tabularFeature` - Tabular feature that will be used to filter images +6. `groupVar` - This is a mandatory input across which to subset data. This can take either 1 or 2 variables as input and if 2 variables are provided then the second variable will be treated as the minor grouping variable. In our example, if the `z` is provided as input, then within a subcollection, the mean of the feature value will be taken for all images with the same z. Then the z positions will be filtered out based on the input of `percentile` and `removeDirection` variables. Now if `z,c` are provided as input, then 'c' will be treated as the minor grouping variable which means that the mean will be taken for all images with the same z for each channel. Also, the plugin will ensures that the same values of z positions are filtered out across c. +7. `percentile` and `removeDirection` - These two variables denote the critieria with which images are filtered. For example, if percentile is `0.1` and removeDirection is set to `Below` then images with feature value below the 10th percentile will be removed. On the other hand, if removeDirection is set to above then all images with feature value greater than the 10th pecentile will be removed. This enables data subsetting from both `brightfield` and `darkfield` microscopy images. + + **Optional Arguments** + 8. `sectionVar` - This is an optional input to segregate the input image collection into sub-collections. The analysis will be done seperately for each sub-collection. In our example, if the user enters `r,t` as the sectionVar, then we will have 15 subcollections (5*3),1 for each combination of timepoint and replicate. If the user enters `r` as sectionVar, then we will have 5 sub collections, 1 for each replicate. If the user wants to consider the whole image collection as a single section, then no input is required. NOTE: As a post processing step, same number of images will be subsetted across different sections. -9. `padding` - This is an optional variable with default value of 0. A delay of 3 means that 3 additional planes will captured on either side of the subsetted data. This can be used as a sanity check to ensure that the subsetted data captures the images we want. For example, in our examples if the following z values were filtered out intitially - 5,6,7 ; then a delay of 3 means that the output dataset will have z positions 2,3,4,5,6,7,8,9,10 if all them exist. +9. `padding` - This is an optional variable with default value of 0. A delay of 3 means that 3 additional planes will captured on either side of the subsetted data. This can be used as a sanity check to ensure that the subsetted data captures the images we want. For example, in our examples if the following z values were filtered out intitially - 5,6,7 ; then a delay of 3 means that the output dataset will have z positions 2,3,4,5,6,7,8,9,10 if all them exist. 10. `writeOutput` - This is an optional argument with default value `True`. If it is set to true, then both the output image collection and `summary.txt` file will be created. If it is set to false, then the output directory will only consist of summary.txt. This option enables the user to tune the hyperparameters such as percentile, removeDirecton, feature without actually creating the output image collection. @@ -38,19 +39,20 @@ If WIPP is running, navigate to the plugins page and add a new plugin. Paste the ## Options -This plugin takes one input argument and one output argument: +This plugin takes eleven input arguments and one output argument: | Name | Description | I/O | Type | | ------------------- | ----------------------------------------------------- | ------ | ------------- | -| `--csvDir` | CSV collection containing features | Input | csvCollection | -| `--padding` | Number of images to capture outside the cutoff | Input | int | -| `--feature` | Feature to use to subset data | Input | string | +| `--inpDir` | Input image collection to be processed by this plugin | Input | collection | +| `--tabularDir` | Path to tabular data | Input | genericData | | `--filePattern` | Filename pattern used to separate data | Input | string | +| `--imageFeature` | Feature in tabular data with image filenames | Input | string | +| `--tabularFeature` | Tabular feature to filter image files | Input | string | +| `--padding` | Number of images to capture outside the cutoff | Input | integer | | `--groupVar` | variables to group by in a section | Input | string | -| `--inpDir` | Input image collection to be processed by this plugin | Input | collection | -| `--percentile` | Percentile to remove | Input | int | +| `--percentile` | Percentile to remove | Input | float | | `--removeDirection` | remove direction above or below percentile | Input | string | | `--sectionVar` | variables to divide larger sections | Input | string | | `--writeOutput` | write output image collection or not | Input | boolean | -| `--outDir` | Output collection | Output | collection | - +| `--outDir` | Output collection | Output | genericData | +| `--preview` | Generate a JSON file with outputs | Output | JSON | diff --git a/clustering/feature-subsetting-tool/VERSION b/clustering/feature-subsetting-tool/VERSION new file mode 100644 index 0000000..6c0f6f4 --- /dev/null +++ b/clustering/feature-subsetting-tool/VERSION @@ -0,0 +1 @@ +0.2.1-dev0 diff --git a/clustering/feature-subsetting-tool/build-docker.sh b/clustering/feature-subsetting-tool/build-docker.sh new file mode 100644 index 0000000..d82557e --- /dev/null +++ b/clustering/feature-subsetting-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$( x00_y01_p01_c1.ome.tif +x00_y01_p03_c2.ome.tif -----> x00_y01_p01_c2.ome.tif +x00_y01_p03_c3.ome.tif -----> x00_y01_p01_c3.ome.tif +x00_y01_p03_c4.ome.tif -----> x00_y01_p01_c4.ome.tif +x00_y01_p03_c5.ome.tif -----> x00_y01_p01_c5.ome.tif +x00_y01_p04_c1.ome.tif -----> x00_y01_p02_c1.ome.tif +x00_y01_p04_c2.ome.tif -----> x00_y01_p02_c2.ome.tif +x00_y01_p04_c3.ome.tif -----> x00_y01_p02_c3.ome.tif +x00_y01_p04_c4.ome.tif -----> x00_y01_p02_c4.ome.tif +x00_y01_p04_c5.ome.tif -----> x00_y01_p02_c5.ome.tif diff --git a/formats/tabular-to-arrow-tool/package-release.sh b/clustering/feature-subsetting-tool/package-release.sh old mode 100755 new mode 100644 similarity index 74% rename from formats/tabular-to-arrow-tool/package-release.sh rename to clustering/feature-subsetting-tool/package-release.sh index deb2942..1efde1b --- a/formats/tabular-to-arrow-tool/package-release.sh +++ b/clustering/feature-subsetting-tool/package-release.sh @@ -10,7 +10,7 @@ bump2version --config-file bumpversion.cfg --new-version ${version} --allow-dirt ./build-docker.sh # Push to dockerhub -docker push polusai/tabular-to-arrow-plugin:${version} +docker push polusai/feature-subsetting-tool:${version} -# Run unittests -python -m unittest +# Run pytests +python -m pytest -s tests diff --git a/clustering/feature-subsetting-tool/plugin.json b/clustering/feature-subsetting-tool/plugin.json new file mode 100644 index 0000000..a93c591 --- /dev/null +++ b/clustering/feature-subsetting-tool/plugin.json @@ -0,0 +1,173 @@ +{ + "name": "Feature Subsetting", + "version": "0.2.1-dev0", + "title": "Feature Subsetting", + "description": "Subset data using a given feature.", + "author": "Gauhar Bains (gauhar.bains@labshare.org) and Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov)", + "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/PolusAI/tabular-tools", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": "", + "containerId": "polusai/feature-subsetting-plugin:0.2.1-dev0", + "baseCommand": [ + "python3", + "-m", + "polus.tabular.clustering.feature_subsetting" + ], + "inputs": { + "inpDir": { + "type": "collection", + "title": "Input image directory", + "description": "Input image directory.", + "required": "True" + }, + "tabularDir": { + "type": "genericData", + "title": "Input tabular directory", + "description": "Path to directory containing tabular data.", + "required": "True" + }, + "filePattern": { + "type": "string", + "title": "Filename pattern", + "description": "Filename pattern used to separate data.", + "required": "True" + }, + "imageFeature": { + "type": "string", + "title": "imageFeature", + "description": "Feature in tabular data containing image filenames.", + "required": "True" + }, + "tabularFeature": { + "type": "string", + "title": "tabularFeature", + "description": "Feature in tabular data to subset image data.", + "required": "True" + }, + "padding": { + "type": "integer", + "title": "padding", + "description": "Number of images to capture outside the cutoff.", + "required": "False" + }, + "groupVar": { + "type": "string", + "title": "groupVar", + "description": "variables to group by in a section.", + "required": "True" + }, + "percentile": { + "type": "float", + "title": "percentile", + "description": "Percentile to remove.", + "required": "True" + }, + "removeDirection": { + "type": "string", + "title": "removeDirection", + "description": "Remove direction above or below percentile.", + "required": "False", + "default": "Below" + }, + "sectionVar": { + "type": "string", + "title": "sectionVar", + "description": "Variables to divide larger sections.", + "required": "False" + }, + "writeOutput": { + "type": "boolean", + "title": "writeOutput", + "description": "Write output image collection or not.", + "required": "False" + }, + "preview": { + "type": "boolean", + "title": "Preview", + "description": "Generate an output preview.", + "required": "False" + } + }, + "outputs": { + "outDir": { + "type": "genericData", + "description": "Output collection." + } + }, + "ui": { + "inpDir": { + "type": "collection", + "title": "Input image directory", + "description": "Input image directory.", + "required": "True" + }, + "tabularDir": { + "type": "genericData", + "title": "Input tabular directory", + "description": "Path to directory containing tabular data.", + "required": "True" + }, + "filePattern": { + "type": "string", + "title": "Filename pattern", + "description": "Filename pattern used to separate data.", + "required": "True" + }, + "imageFeature": { + "type": "string", + "title": "imageFeature", + "description": "Feature in tabular data containing image filenames.", + "required": "True" + }, + "tabularFeature": { + "type": "string", + "title": "tabularFeature", + "description": "Feature in tabular data to subset image data.", + "required": "True" + }, + "padding": { + "type": "integer", + "title": "padding", + "description": "Number of images to capture outside the cutoff.", + "required": "False" + }, + "groupVar": { + "type": "string", + "title": "groupVar", + "description": "variables to group by in a section.", + "required": "True" + }, + "percentile": { + "type": "float", + "title": "percentile", + "description": "Percentile to remove.", + "required": "True" + }, + "removeDirection": { + "type": "string", + "title": "removeDirection", + "description": "Remove direction above or below percentile.", + "required": "False", + "default": "Below" + }, + "sectionVar": { + "type": "string", + "title": "sectionVar", + "description": "Variables to divide larger sections.", + "required": "False" + }, + "writeOutput": { + "type": "boolean", + "title": "writeOutput", + "description": "Write output image collection or not.", + "required": "False" + }, + "preview": { + "type": "boolean", + "title": "Preview", + "description": "Generate an output preview.", + "required": "False" + } + } +} \ No newline at end of file diff --git a/clustering/feature-subsetting-tool/pyproject.toml b/clustering/feature-subsetting-tool/pyproject.toml new file mode 100644 index 0000000..7a16b7c --- /dev/null +++ b/clustering/feature-subsetting-tool/pyproject.toml @@ -0,0 +1,30 @@ +[tool.poetry] +name = "polus-tabular-clustering-feature-subsetting" +version = "0.2.1-dev0" +description = "Subset data using a given feature." +authors = [ + "Gauhar Bains ", + "Hamdah Shafqat abbasi " + ] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" +filepattern = "^2.0.4" +typer = "^0.7.0" +tqdm = "^4.64.1" +vaex = "^4.17.0" + + +[tool.poetry.group.dev.dependencies] +pre-commit = "^3.3.3" +bump2version = "^1.0.1" +pytest = "^7.3.2" +pytest-xdist = "^3.3.1" +pytest-sugar = "^0.9.7" +ipykernel = "^6.28.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/clustering/feature-subsetting-tool/run-docker.sh b/clustering/feature-subsetting-tool/run-docker.sh new file mode 100644 index 0000000..0810b5c --- /dev/null +++ b/clustering/feature-subsetting-tool/run-docker.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +version=$( None: + """Generate preview of the plugin outputs.""" + shutil.copy( + Path(__file__).parents[4].joinpath("example/summary.txt"), + out_dir, + ) + + +@app.command() +def main( # noqa: PLR0913 + inp_dir: Path = typer.Option( + ..., + "--inpDir", + "-i", + help="Path to the collection of input images.", + ), + tabular_dir: Path = typer.Option( + ..., + "--tabularDir", + "-t", + help="Path to the collection of tabular files containing features.", + ), + file_pattern: Optional[str] = typer.Option( + ".*", + "--filePattern", + "-f", + help="Pattern use to parse filenames", + ), + image_feature: str = typer.Option( + None, + "--imageFeature", + "-if", + help="Image filenames feature in tabular data.", + ), + tabular_feature: str = typer.Option( + None, + "--tabularFeature", + "-tf", + help="Select tabular feature to subset data.", + ), + padding: Optional[int] = typer.Option( + 0, + "--padding", + "-p", + help="Number of images to capture outside the cutoff.", + ), + group_var: str = typer.Option( + ..., + "--groupVar", + "-g", + help="variables to group by in a section.", + ), + percentile: float = typer.Option( + None, + "--percentile", + "-pc", + help="Percentile to remove.", + ), + remove_direction: Optional[str] = typer.Option( + "Below", + "--removeDirection", + "-r", + help="Remove direction above or below percentile.", + ), + section_var: Optional[str] = typer.Option( + None, + "--sectionVar", + "-s", + help="Variables to divide larger sections.", + ), + write_output: Optional[bool] = typer.Option( + False, + "--writeOutput", + "-w", + help="Write output image collection or not.", + ), + out_dir: Path = typer.Option( + ..., + "--outDir", + "-o", + help="Output directory", + ), + preview: Optional[bool] = typer.Option( + False, + "--preview", + help="Output a JSON preview of files", + ), +) -> None: + """Subset data using a given feature.""" + logger.info(f"--inpDir = {inp_dir}") + logger.info(f"--tabularDir = {tabular_dir}") + logger.info(f"--imageFeature = {image_feature}") + logger.info(f"--tabularFeature = {tabular_feature}") + logger.info(f"--filePattern = {file_pattern}") + logger.info(f"--padding = {padding}") + logger.info(f"--groupVar = {group_var}") + logger.info(f"--percentile = {percentile}") + logger.info(f"--removeDirection = {remove_direction}") + logger.info(f"--sectionVar = {section_var}") + logger.info(f"--writeOutput = {write_output}") + logger.info(f"--outDir = {out_dir}") + + inp_dir = inp_dir.resolve() + out_dir = out_dir.resolve() + + assert inp_dir.exists(), f"{inp_dir} does not exist!! Please check input path again" + assert ( + out_dir.exists() + ), f"{out_dir} does not exist!! Please check output path again" + + if preview: + generate_preview(out_dir) + + else: + fs.feature_subset( + inp_dir, + tabular_dir, + out_dir, + file_pattern, + group_var, + percentile, + remove_direction, + section_var, + image_feature, + tabular_feature, + padding, + write_output, + ) + + +if __name__ == "__main__": + app() diff --git a/clustering/feature-subsetting-tool/src/polus/tabular/clustering/feature_subsetting/feature_subset.py b/clustering/feature-subsetting-tool/src/polus/tabular/clustering/feature_subsetting/feature_subset.py new file mode 100644 index 0000000..15e4b74 --- /dev/null +++ b/clustering/feature-subsetting-tool/src/polus/tabular/clustering/feature_subsetting/feature_subset.py @@ -0,0 +1,300 @@ +"""Feature Subsetting Tool.""" + +import logging +import os +import shutil +from pathlib import Path +from typing import Any + +import filepattern +import vaex +from tqdm import tqdm + +CHUNK_SIZE = 10000 + +logger = logging.getLogger(__name__) +logger.setLevel(os.environ.get("POLUS_LOG", logging.INFO)) +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".csv") + + +def filter_planes( + feature_dict: dict, + remove_direction: str, + percentile: float, +) -> set[Any]: + """Filter planes by the criteria specified by remove_direction and percentile. + + Args: + feature_dict : planes and respective feature value + remove_direction: remove above or below percentile + percentile : cutoff percentile + + Returns: + set: planes that fit the criteria + """ + planes = list(feature_dict.keys()) + feat_value = [feature_dict[i] for i in planes] + thresh = min(feat_value) + percentile * (max(feat_value) - min(feat_value)) + + # filter planes + if remove_direction == "Below": + keep_planes = [z for z in planes if feature_dict[z] >= thresh] + else: + keep_planes = [z for z in planes if feature_dict[z] <= thresh] + + return set(keep_planes) + + +def make_uniform(planes_dict: dict, uniques: list[int], padding: int) -> dict: + """Ensure each section has the same number of images. + + This function makes the output collection uniform in + the sense that it preserves same number of planes across + sections. It also captures additional planes based + on the value of the padding variable + + Args: + planes_dict: planes to keep in different sections + uniques : unique values for the major grouping variable + padding : additional images to capture outside cutoff + + Returns: + dictionary: dictionary containing planes to keep + """ + # max no. of planes + max_len = max([len(i) for i in planes_dict.values()]) + + # max planes that can be added on each side + min_ind = min([min(planes_dict[k]) for k in planes_dict]) + max_ind = max([max(planes_dict[k]) for k in planes_dict]) + max_add_left = uniques.index(min_ind) + max_add_right = len(uniques) - (uniques.index(max_ind) + 1) + + # add planes in each section based on padding and max number of planes + for section_id, planes in planes_dict.items(): + len_to_add = max_len - len(planes) + len_add_left = min(int(len_to_add) / 2 + padding, max_add_left) + len_add_right = min(len_to_add - len_add_left + padding, max_add_right) + left_ind = int(uniques.index(min(planes)) - len_add_left) + right_ind = int(uniques.index(max(planes)) + len_add_right) + 1 + planes_dict[section_id] = uniques[left_ind:right_ind] + return planes_dict + + +def feature_subset( # noqa : C901 + inp_dir: Path, + tabular_dir: Path, + out_dir: Path, + file_pattern: str, + group_var: str, + percentile: float, + remove_direction: str, + section_var: str, + image_feature: str, + tabular_feature: str, + padding: int, + write_output: bool, +) -> None: + """Subsetting images based on feature values. + + Args: + inp_dir: Path to the collection of input images + tabular_dir : Path to the tabular data directory + out_dir : Path to output directory + file_pattern : Pattern to parse image file names + group_var : variables to group by in a section + percentile : Percentile to remove + remove_direction : Remove direction above or below percentile + section_var : Variables to divide larger sections + image_feature: Image filenames feature in tabular data + tabular_feature : Select tabular feature to subset data + padding : additional images to capture outside cutoff + write_output : Write output image collection or not. + """ + tabular_dir_files = [ + f + for f in Path(tabular_dir).iterdir() + if f.is_file() + and "".join(f.suffixes) in [".csv", ".arrow", ".parquet", ".fits"] + ] + + if len(tabular_dir_files) == 0: + msg = f"No tabular files detected Please check {tabular_dir} again" + raise ValueError(msg) + + # Get the column headers + headers = [] + for in_file in tabular_dir_files: + df = vaex.open(in_file) + headers.append(list(df.columns)) + headers = list(set(headers[0]).intersection(*headers)) + logger.info("Merging the data along rows...") + + featuredf = [] + for in_file in tqdm( + tabular_dir_files, + total=len(tabular_dir_files), + desc="Vaex loading of file", + ): + if in_file.suffix == ".csv": + df = vaex.from_csv(in_file, chunk_size=100_000, convert=True) + else: + df = vaex.open(in_file) + df = df[list(headers)] + featuredf.append(df) + + feature_df = vaex.concat(featuredf) + + if feature_df.shape[0] == 0: + msg = f"tabular files are empty Please check {tabular_dir} again" + raise ValueError(msg) + + # store image name and its feature value + feature_dict = dict( + zip( + list(feature_df[image_feature].to_numpy()), + list(feature_df[tabular_feature].to_numpy()), + ), + ) + + # seperate filepattern variables into different categories + fps = filepattern.FilePattern(inp_dir, file_pattern) + if not len(fps) > 0: + msg = "No image files are detected. Please check filepattern again!" + raise ValueError(msg) + + uniques = fps.get_unique_values() + var = fps.get_variables() + grouping_variables = group_var.split(",") + if len(grouping_variables) > 1: + min_grouping_var, maj_grouping_var = ( + grouping_variables[1], + grouping_variables[0], + ) + gp_by = [min_grouping_var, maj_grouping_var] + else: + gp_by = [group_var] + + if section_var is not None: + section_variables = section_var.split(",") + sub_section_variables = [ + v for v in var if v not in grouping_variables + section_variables + ] + else: + sub_section_variables = [v for v in var if v not in grouping_variables] + + logger.info("Iterating over sections...") + # single iteration of this loop gives all images in one section + + section_feat = [] + section_keep_planes = [] + keep_planes = {} + + for file in fps(group_by=gp_by): + section_feat_dict: dict[Any, Any] = {} + if section_var is not None: + section_id = tuple([file[0][i] for i in section_var.split(",")]) + else: + section_id = 1 + + # iterate over files in one section + + fm = file[1][0][0] + fname = file[1][0][1][0].name + + if min_grouping_var is None: + fm[min_grouping_var] = None + + if fm[min_grouping_var] not in section_feat_dict: + section_feat_dict[fm[min_grouping_var]] = {} + + if fm[maj_grouping_var] not in section_feat_dict[fm[min_grouping_var]]: + section_feat_dict[fm[min_grouping_var]][fm[maj_grouping_var]] = [] + + section_feat_dict[fm[min_grouping_var]][fm[maj_grouping_var]].append( + feature_dict[fname], + ) + + section_feat.append(section_feat_dict) + + sectionfeat: dict[Any, Any] = {} + for f in section_feat: + for k, v in f.items(): + if k not in sectionfeat: + sectionfeat[k] = {} + sectionfeat[k].update(v) + + # average feature value by grouping variable + + for key1 in sectionfeat: + for key2 in sectionfeat[key1]: + sectionfeat[key1][key2] = sum(sectionfeat[key1][key2]) / len( + sectionfeat[key1][key2], + ) + + # find planes to keep based on specified criteria + section_keep_planes.append( + filter_planes(sectionfeat[key1], remove_direction, percentile), + ) + + # keep same planes within a section, across the minor grouping variable + section_keep_planes = list(section_keep_planes[0].union(*section_keep_planes)) + section_keep_planes = [ + i + for i in range( # type: ignore + min(section_keep_planes), + max(section_keep_planes) + 1, # type: ignore + ) + if i in uniques[maj_grouping_var] + ] + keep_planes[section_id] = section_keep_planes + + # # keep same number of planes across different sections + keep_planes = make_uniform(keep_planes, list(uniques[maj_grouping_var]), padding) + + # start writing summary.txt + summary = Path.open(Path(out_dir, "summary.txt"), "w") + + summary.write("\n Files : \n \n") + # update summary.txt with section renaming info + + logger.info("renaming subsetted data") + + for file in fps(group_by=sub_section_variables + grouping_variables): + if section_var is not None: + section_id = tuple([file[0][i] for i in section_var.split(",")]) + else: + section_id = 1 + + section_keep_planes = keep_planes[section_id] + rename_map = dict(zip(keep_planes[section_id], uniques[maj_grouping_var])) + + if section_var is not None and section_var.strip(): + summary.write( + f"Section : {({k: file[0][k] for k in section_variables})} \n", + ) + logger.info( + "Renaming files from section : {} \n".format( + {k: file[0][k] for k in section_variables}, + ), + ) + fm = file[1][0][0] + fname = file[1][0][1][0].name + + if fm[maj_grouping_var] not in keep_planes[section_id]: + continue + + # old and new file name + old_file_name = fname + + file_name_dict = dict(fm.items()) + file_name_dict[maj_grouping_var] = rename_map[fm[maj_grouping_var]] + + new_file_name = fps.get_matching(**file_name_dict)[0][1][0].name + + # if write output collection + if write_output: + shutil.copy2(Path(inp_dir, old_file_name), Path(out_dir, new_file_name)) + + summary.write(f"{old_file_name} -----> {new_file_name} \n") + summary.close() diff --git a/clustering/feature-subsetting-tool/tests/__init__.py b/clustering/feature-subsetting-tool/tests/__init__.py new file mode 100644 index 0000000..00b38f2 --- /dev/null +++ b/clustering/feature-subsetting-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Feature Subsetting Tool.""" diff --git a/clustering/feature-subsetting-tool/tests/conftest.py b/clustering/feature-subsetting-tool/tests/conftest.py new file mode 100644 index 0000000..6aee03c --- /dev/null +++ b/clustering/feature-subsetting-tool/tests/conftest.py @@ -0,0 +1,58 @@ +"""Test fixtures. + +Set up all data used in tests. +""" +import tempfile +from pathlib import Path + +import numpy as np +import pandas as pd +import pytest + + +@pytest.fixture( + params=[ + (500, ".csv"), + ], +) +def get_params(request: pytest.FixtureRequest) -> tuple[int, str]: + """To get the parameter of the fixture.""" + return request.param + + +@pytest.fixture() +def generate_synthetic_data( + get_params: tuple[int, str], +) -> tuple[Path, Path, Path, str]: + """Generate tabular data.""" + nrows, file_extension = get_params + input_directory = Path(tempfile.mkdtemp(prefix="inpDir_", dir=Path.cwd())) + tabular_directory = Path(tempfile.mkdtemp(prefix="tabularDir_", dir=Path.cwd())) + output_directory = Path(tempfile.mkdtemp(prefix="out_", dir=Path.cwd())) + rng = np.random.default_rng() + channels = 5 + zpos = 4 + nrows = 3 + for c in range(channels): + for z in range(zpos): + file_name = Path(input_directory, f"x00_y01_p0{z}_c{c}.ome.tif") + Path.open(Path(file_name), "a").close() + + tabular_data = { + "intensity_image": [file_name.name] * nrows, + "MEAN": rng.random(nrows).tolist(), + "MEAN_ABSOLUTE_DEVIATION": rng.random(nrows).tolist(), + "MEDIAN": rng.random(nrows).tolist(), + "MODE": rng.random(nrows).tolist(), + } + outname = file_name.stem.split(".")[0] + + df = pd.DataFrame(tabular_data) + if file_extension == ".csv": + outpath = Path(tabular_directory, f"{outname}.csv") + df.to_csv(outpath, index=False) + if file_extension == ".arrow": + outpath = Path(tabular_directory, f"{outname}.arrow") + df.to_feather(outpath) + + return input_directory, tabular_directory, output_directory, file_extension diff --git a/clustering/feature-subsetting-tool/tests/test_cli.py b/clustering/feature-subsetting-tool/tests/test_cli.py new file mode 100644 index 0000000..aece6a2 --- /dev/null +++ b/clustering/feature-subsetting-tool/tests/test_cli.py @@ -0,0 +1,92 @@ +"""Test Command line Tool.""" + +from typer.testing import CliRunner +from polus.tabular.clustering.feature_subsetting.__main__ import app +import shutil +from pathlib import Path + + +def test_cli(generate_synthetic_data: tuple[Path, Path, Path, str]) -> None: + """Test the command line.""" + inp_dir, tabular_dir, out_dir, _ = generate_synthetic_data + file_pattern = "x{x+}_y{y+}_p{p+}_c{c+}.ome.tif" + image_feature = "intensity_image" + tabular_feature = "MEAN" + padding = 0 + group_var = "p,c" + + runner = CliRunner() + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--tabularDir", + tabular_dir, + "--filePattern", + file_pattern, + "--imageFeature", + image_feature, + "--tabularFeature", + tabular_feature, + "--padding", + padding, + "--groupVar", + group_var, + "--percentile", + 0.8, + "--removeDirection", + "Below", + "--writeOutput", + "--outDir", + out_dir, + ], + ) + + assert result.exit_code == 0 + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) + shutil.rmtree(tabular_dir) + + +def test_short_cli(generate_synthetic_data: tuple[Path, Path, Path, str]) -> None: + """Test short cli command line.""" + inp_dir, tabular_dir, out_dir, _ = generate_synthetic_data + file_pattern = "x{x+}_y{y+}_p{p+}_c{c+}.ome.tif" + image_feature = "intensity_image" + tabular_feature = "MEAN" + padding = 0 + group_var = "p,c" + + runner = CliRunner() + result = runner.invoke( + app, + [ + "-i", + inp_dir, + "-t", + tabular_dir, + "-f", + file_pattern, + "-if", + image_feature, + "-tf", + tabular_feature, + "-p", + padding, + "-g", + group_var, + "-pc", + 0.8, + "-r", + "Below", + "-w", + "-o", + out_dir, + ], + ) + + assert result.exit_code == 0 + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) + shutil.rmtree(tabular_dir) diff --git a/clustering/feature-subsetting-tool/tests/test_feature_subsetting.py b/clustering/feature-subsetting-tool/tests/test_feature_subsetting.py new file mode 100644 index 0000000..1675392 --- /dev/null +++ b/clustering/feature-subsetting-tool/tests/test_feature_subsetting.py @@ -0,0 +1,72 @@ +"""Test Feature Subsetting Plugin.""" + +import shutil +from pathlib import Path + +import polus.tabular.clustering.feature_subsetting.feature_subset as fs + + +def test_feature_subset( + generate_synthetic_data: tuple[Path, Path, Path, str], +) -> None: + """Test images subsetting based on feature values.""" + inp_dir, tabular_dir, out_dir, _ = generate_synthetic_data + file_pattern = "x{x+}_y{y+}_p{p+}_c{c+}.ome.tif" + image_feature = "intensity_image" + tabular_feature = "MEAN" + padding = 0 + percentile = 0.8 + remove_direction = "Below" + group_var = "p,c" + write_output = True + + fs.feature_subset( + inp_dir=inp_dir, + tabular_dir=tabular_dir, + out_dir=out_dir, + file_pattern=file_pattern, + group_var=group_var, + percentile=percentile, + remove_direction=remove_direction, + section_var=None, + image_feature=image_feature, + tabular_feature=tabular_feature, + padding=padding, + write_output=write_output, + ) + + out_ext = [Path(f.name).suffix for f in out_dir.iterdir()] + assert len(out_ext) != 0 + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) + shutil.rmtree(tabular_dir) + + +def test_filter_planes() -> None: + """Test filter planes.""" + feature_dict = { + 1: 1236.597914951989, + 2: 1153.754875685871, + 3: 1537.3429175240055, + 4: 1626.0415809327849, + } + + percentile = 0.1 + remove_direction = "Below" + fn = fs.filter_planes( + feature_dict=feature_dict, + remove_direction=remove_direction, + percentile=percentile, + ) + + assert type(fn) == set + + +def test_make_uniform() -> None: + """Test each section contain same number of images.""" + planes_dict = {1: [3, 4]} + uniques = [1, 2, 3, 4] + padding = 0 + fn = fs.make_uniform(planes_dict=planes_dict, uniques=uniques, padding=padding) + + assert len(fn) != 0 diff --git a/formats/tabular-to-arrow-tool/.bumpversion.cfg b/clustering/hdbscan-clustering-tool/.bumpversion.cfg similarity index 78% rename from formats/tabular-to-arrow-tool/.bumpversion.cfg rename to clustering/hdbscan-clustering-tool/.bumpversion.cfg index 9434540..732b62d 100644 --- a/formats/tabular-to-arrow-tool/.bumpversion.cfg +++ b/clustering/hdbscan-clustering-tool/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.2.3-dev0 +current_version = 0.4.8-dev1 commit = True tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? @@ -23,5 +23,6 @@ replace = version = "{new_version}" [bumpversion:file:plugin.json] [bumpversion:file:VERSION] +[bumpversion:file:README.md] -[bumpversion:file:src/polus/images/formats/tabular_to_arrow/__init__.py] +[bumpversion:file:src/polus/tabular/clustering/hdbscan_clustering/__init__.py] diff --git a/clustering/hdbscan-clustering-tool/.gitignore b/clustering/hdbscan-clustering-tool/.gitignore new file mode 100644 index 0000000..9ed1c37 --- /dev/null +++ b/clustering/hdbscan-clustering-tool/.gitignore @@ -0,0 +1,23 @@ +# Jupyter Notebook +.ipynb_checkpoints +poetry.lock +../../poetry.lock +# Environments +.env +.myenv +.venv +env/ +venv/ +# test data directory +data +# yaml file +.pre-commit-config.yaml +# hidden files +.DS_Store +.ds_store +# flake8 +.flake8 +../../.flake8 +__pycache__ +.mypy_cache +requirements.txt diff --git a/formats/arrow-to-tabular-tool/Dockerfile b/clustering/hdbscan-clustering-tool/Dockerfile similarity index 68% rename from formats/arrow-to-tabular-tool/Dockerfile rename to clustering/hdbscan-clustering-tool/Dockerfile index 210b38a..69e7d18 100644 --- a/formats/arrow-to-tabular-tool/Dockerfile +++ b/clustering/hdbscan-clustering-tool/Dockerfile @@ -1,7 +1,8 @@ -FROM polusai/bfio:2.1.9 +FROM polusai/bfio:2.3.6 # environment variables defined in polusai/bfio ENV EXEC_DIR="/opt/executables" +ENV POLUS_LOG="INFO" ENV POLUS_IMG_EXT=".ome.tif" ENV POLUS_TAB_EXT=".csv" @@ -11,10 +12,10 @@ WORKDIR ${EXEC_DIR} COPY pyproject.toml ${EXEC_DIR} COPY VERSION ${EXEC_DIR} COPY README.md ${EXEC_DIR} -RUN pip3 install --index-url https://test.pypi.org/simple/ filepattern==2.2.7 COPY src ${EXEC_DIR}/src RUN pip3 install ${EXEC_DIR} --no-cache-dir -ENTRYPOINT ["python3", "-m", "polus.images.formats.arrow_to_tabular"] + +ENTRYPOINT ["python3", "-m", "polus.tabular.clustering.hdbscan_clustering"] CMD ["--help"] diff --git a/clustering/polus-hdbscan-clustering-plugin/README.md b/clustering/hdbscan-clustering-tool/README.md similarity index 67% rename from clustering/polus-hdbscan-clustering-plugin/README.md rename to clustering/hdbscan-clustering-tool/README.md index 2169be0..37f1589 100644 --- a/clustering/polus-hdbscan-clustering-plugin/README.md +++ b/clustering/hdbscan-clustering-tool/README.md @@ -1,17 +1,20 @@ -# Hierarchical Density-Based Spatial Clustering of Applications with Noise(HDBSCAN) Clustering +# Hierarchical Density-Based Spatial Clustering of Applications with Noise(HDBSCAN) Clustering (v0.4.8-dev1) The HDBSCAN Clustering plugin clusters the data using [HDBSCAN clustering](https://pypi.org/project/hdbscan/) library. The input and output for this plugin is a CSV file. Each observation (row) in the input CSV file is assigned to one of the clusters. The output CSV file contains the column `cluster` that identifies the cluster to which each observation belongs. A user can supply a regular expression with capture groups if they wish to cluster each group independently, or if they wish to average the numerical features across each group and treat them as a single observation. ## Inputs: -### Input CSV collection: -The input file(s) that need to be clustered. The file should be in CSV format. This is a required parameter for the plugin. +### Input directory: +This plugin supports the all [vaex](https://vaex.readthedocs.io/en/latest/guides/io.html) supported file formats. + +### Filename pattern: +This plugin uses [filepattern](https://filepattern2.readthedocs.io/en/latest/Home.html) python library to parse file names of tabular files to be processed by this plugin. ### Grouping pattern: -The input for this parameter is a regular expression with capture group. This input splits the data into groups based on the matched pattern. A new column `group` is created in the output CSV file that has the group based on the given pattern. Unless `averageGroups` is set to `true`, providing a grouping pattern will cluster each group independently. +The input for this parameter is a regular expression with capture group. This input splits the data into groups based on the matched pattern. A new column `group` is created in the output file that has the group based on the given pattern. Unless `averageGroups` is set to `true`, providing a grouping pattern will cluster each group independently. ### Average groups: -Setting this equal to `true` will use the supplied `groupingPattern` to average the numerical features and produce a single row per group which is then clustered. The resulting cluster is assigned to all observations belonging in that group. +`groupingPattern` to average the numerical features and produce a single row per group which is then clustered. The resulting cluster is assigned to all observations belonging in that group. ### Label column: This is the name of the column containing the labels to be used with `groupingPattern`. @@ -20,10 +23,10 @@ This is the name of the column containing the labels to be used with `groupingPa This parameter defines the smallest number of points that should be considered as cluster. This is a required parameter. The input should be an integer and the value should be greater than 1. ### Increment outlier ID: -This parameter sets the ID of the outlier cluster to `1`, otherwise it will be 0. This is useful for visualization purposes if the resulting cluster IDs are turned into image annotations. +This parameter sets the ID of the outlier cluster to `1`, otherwise it will be 0. This is useful for visualization purposes if the resulting cluster IDs are turned into image annotations. ## Output: -The output is a CSV file containing the clustered data. +The output is a tabular file containing the clustered data. ## Building To build the Docker image for the conversion plugin, run @@ -39,10 +42,11 @@ This plugin takes four input arguments and one output argument: | Name | Description | I/O | Type | | ---------------------- | ---------------------------------------------------------------------------------------------- | ------ | ------------- | -| `--inpDir` | Input csv collection. | Input | csvCollection | +| `--inpDir` | Input tabular data files. | Input | genericData | | `--groupingPattern` | Regular expression to group rows. Clustering will be applied across capture groups by default. | Input | string | -| `--averageGroups` | If set to `true`, will average data across groups. Requires capture groups | Input | string | +| `--averageGroups` | Average data across groups. Requires capture groups | Input | boolean | | `--labelCol` | Name of the column containing labels for grouping pattern. | Input | string | -| `--minClusterSize` | Minimum cluster size. | Input | integer | -| `--incrementOutlierId` | Increments outlier ID to 1. | Input | string | -| `--outDir` | Output collection | Output | csvCollection | +| `--minClusterSize` | Minimum cluster size. | Input | number | +| `--incrementOutlierId` | Increments outlier ID to 1. | Input | boolean | +| `--outDir` | Output collection | Output | genericData | +| `--preview` | Generate a JSON file with outputs | Output | JSON | diff --git a/clustering/hdbscan-clustering-tool/VERSION b/clustering/hdbscan-clustering-tool/VERSION new file mode 100644 index 0000000..5915443 --- /dev/null +++ b/clustering/hdbscan-clustering-tool/VERSION @@ -0,0 +1 @@ +0.4.8-dev1 diff --git a/clustering/hdbscan-clustering-tool/build-docker.sh b/clustering/hdbscan-clustering-tool/build-docker.sh new file mode 100755 index 0000000..2e7dd18 --- /dev/null +++ b/clustering/hdbscan-clustering-tool/build-docker.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +version=$(", + "Hythem Sidky ", + "Hamdah Shafqat abbasi " + ] +readme = "README.md" +packages = [{include = "polus", from = "src"}] + +[tool.poetry.dependencies] +python = ">=3.9,<3.12" +filepattern = "^2.0.4" +typer = "^0.7.0" +tqdm = "^4.64.1" +preadator="0.4.0.dev2" +vaex = "^4.17.0" +hdbscan = "^0.8.34rc1" + + +[tool.poetry.group.dev.dependencies] +pre-commit = "^3.3.3" +bump2version = "^1.0.1" +pytest = "^7.3.2" +pytest-xdist = "^3.3.1" +pytest-sugar = "^0.9.7" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/clustering/hdbscan-clustering-tool/run-docker.sh b/clustering/hdbscan-clustering-tool/run-docker.sh new file mode 100755 index 0000000..9311151 --- /dev/null +++ b/clustering/hdbscan-clustering-tool/run-docker.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +version=$( None: + """Cluster data using HDBSCAN.""" + logger.info(f"--inpDir = {inp_dir}") + logger.info(f"--filePattern = {file_pattern}") + # Regular expression for grouping. + logger.info(f"--groupingPattern = {grouping_pattern}") + # Whether to average data for each group. + logger.info(f"--averageGroups = {average_groups}") + # Name of column to use for grouping. + logger.info(f"--labelCol = {label_col}") + # Minimum cluster size for clustering using HDBSCAN. + logger.info(f"--minClusterSize = {min_cluster_size}") + # Set outlier cluster id as 1. + logger.info(f"--incrementOutlierId = {increment_outlier_id}") + logger.info(f"--outDir = {out_dir}") + + inp_dir = inp_dir.resolve() + out_dir = out_dir.resolve() + + assert inp_dir.exists(), f"{inp_dir} does not exist!! Please check input path again" + assert ( + out_dir.exists() + ), f"{out_dir} does not exist!! Please check output path again" + + num_workers = max([cpu_count(), 2]) + + files = fp.FilePattern(inp_dir, file_pattern) + + if files is None: + msg = f"No tabular files found. Please check {file_pattern} again" + raise ValueError(msg) + + if preview: + with Path.open(Path(out_dir, "preview.json"), "w") as jfile: + out_json: dict[str, Any] = { + "filepattern": file_pattern, + "outDir": [], + } + for file in files(): + out_name = file[1][0].name.replace( + "".join(file[1][0].suffixes), + f"_hdbscan{hd.POLUS_TAB_EXT}", + ) + out_json["outDir"].append(out_name) + json.dump(out_json, jfile, indent=2) + else: + with preadator.ProcessManager( + name="Cluster data using HDBSCAN", + num_processes=num_workers, + threads_per_process=2, + ) as pm: + for file in tqdm( + files(), + total=len(files()), + desc="Clustering data", + mininterval=5, + initial=0, + unit_scale=True, + colour="cyan", + ): + pm.submit_process( + hd.hdbscan_clustering, + file[1][0], + min_cluster_size, + out_dir, + grouping_pattern, + label_col, + average_groups, + increment_outlier_id, + ) + pm.join_processes() + + +if __name__ == "__main__": + app() diff --git a/clustering/hdbscan-clustering-tool/src/polus/tabular/clustering/hdbscan_clustering/hdbscan_clustering.py b/clustering/hdbscan-clustering-tool/src/polus/tabular/clustering/hdbscan_clustering/hdbscan_clustering.py new file mode 100644 index 0000000..3940c28 --- /dev/null +++ b/clustering/hdbscan-clustering-tool/src/polus/tabular/clustering/hdbscan_clustering/hdbscan_clustering.py @@ -0,0 +1,150 @@ +"""Hdbscan Clustering Plugin.""" +import logging +import os +import re +from itertools import chain +from pathlib import Path + +import hdbscan +import numpy as np +import vaex + +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) + +POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".csv") +CHUNK_SIZE = 10000 + + +def hdbscan_model( + data: np.ndarray, + min_cluster_size: int, + increment_outlier_id: bool, +) -> np.ndarray: + """Cluster data using HDBSCAN. + + Args: + data: Data that need to be clustered. + min_cluster_size: Minimum cluster size. + increment_outlier_id : Increment outlier ID to unity. + + Returns: + Cluster labels for each row of data. + """ + clusters = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size).fit(data) + labels = clusters.labels_.flatten().astype(np.uint16) + 1 + return labels + 1 if increment_outlier_id else labels + + +def hdbscan_clustering( # noqa: PLR0913 + file: Path, + min_cluster_size: int, + out_dir: Path, + grouping_pattern: str, + label_col: str, + average_groups: bool, + increment_outlier_id: bool, +) -> None: + """Cluster data using HDBSCAN. + + Args: + file: Path of a tabular file. + min_cluster_size: Smallest size grouping that should be considered as a cluster. + out_dir: Path to output directory. + grouping_pattern: Regular expression to caputure groups in a label_col. + label_col: Name of column containing labels. + average_groups:To average data across groups. + increment_outlier_id: Increment outlier ID to unity. + """ + if Path(file.name).suffix == ".csv": + df = vaex.from_csv(file, convert=True, chunk_size=CHUNK_SIZE) + else: + df = vaex.open(file) + # If user provided a regular expression. + if grouping_pattern: + if label_col == "None": + msg = f"Please define label column to capture groups {label_col}" + raise ValueError(msg) + + # Create a column group with matching string + group = np.array( + [ + re.search(grouping_pattern, x).group(0) # type: ignore + for x in df[label_col].tolist() + if len(re.search(grouping_pattern, x).group(0)) != 0 # type: ignore + ], + ) + if len(group) == 0: + msg = f"Could not find group with pattern {grouping_pattern}" + raise ValueError(msg) + + # Create a column group with matching string + df["group"] = group + int_columns = [ + feature + for feature in df.get_column_names() + if df.data_type(feature) == int or df.data_type(feature) == float + ] + + # If we want to average features for each group. + if average_groups: + df_grouped = df.groupby( + "group", + agg=[vaex.agg.mean(x) for x in int_columns], + ) + # Cluster data using HDBSCAN clustering. + logger.info("Clustering the data") + cluster_ids = hdbscan_model( + df_grouped.values, + min_cluster_size, + increment_outlier_id, + ) + df_grouped["cluster"] = cluster_ids + df = df.join( + df_grouped["group", "cluster"], + left_on="group", + right_on="group", + ) + + else: + dfs = [] + for group, df_ss in df.groupby("group"): + # Cluster data using HDBSCAN clustering. + logger.info(f"Clustering data in group {group}") + + cluster_ids = hdbscan_model( + df_ss.values, + min_cluster_size, + increment_outlier_id, + ) + + dfs.append(cluster_ids) + cluster_ids = np.array(list(chain.from_iterable(dfs))) + df["cluster"] = cluster_ids + + # No grouping. Vanilla clustering. + else: + int_columns = [ + feature + for feature in df.get_column_names() + if df.data_type(feature) == int or df.data_type(feature) == float + ] + + # Cluster data using HDBSCAN clustering + logger.info("Clustering the data") + cluster_ids = hdbscan_model( + df[int_columns].values, + min_cluster_size, + increment_outlier_id, + ) + df["cluster"] = cluster_ids + + outname = Path(out_dir, f"{Path(file.name).stem}_hdbscan{POLUS_TAB_EXT}") + + if POLUS_TAB_EXT == ".arrow": + df.export_feather(outname) + logger.info(f"Saving outputs: {outname}") + else: + df.export_csv(path=outname, chunk_size=CHUNK_SIZE) + + logger.info("Finished all processes!") diff --git a/clustering/hdbscan-clustering-tool/tests/__init__.py b/clustering/hdbscan-clustering-tool/tests/__init__.py new file mode 100644 index 0000000..2f89ec8 --- /dev/null +++ b/clustering/hdbscan-clustering-tool/tests/__init__.py @@ -0,0 +1 @@ +"""Hdbscan Clustering Plugin.""" diff --git a/clustering/hdbscan-clustering-tool/tests/conftest.py b/clustering/hdbscan-clustering-tool/tests/conftest.py new file mode 100644 index 0000000..a609d5b --- /dev/null +++ b/clustering/hdbscan-clustering-tool/tests/conftest.py @@ -0,0 +1,48 @@ +"""Test fixtures. + +Set up all data used in tests. +""" +import tempfile +from pathlib import Path + +import numpy as np +import pandas as pd +import pytest + + +@pytest.fixture( + params=[(50000, ".csv"), (100000, ".arrow")], +) +def get_params(request: pytest.FixtureRequest) -> tuple[int, str]: + """To get the parameter of the fixture.""" + return request.param + + +@pytest.fixture() +def generate_synthetic_data(get_params: tuple[int, str]) -> tuple[Path, Path, str]: + """Generate tabular data.""" + nrows, file_extension = get_params + + input_directory = Path(tempfile.mkdtemp(prefix="inputs_")) + output_directory = Path(tempfile.mkdtemp(prefix="out_")) + rng = np.random.default_rng() + tabular_data = { + "sepal_length": rng.random(nrows).tolist(), + "sepal_width": rng.random(nrows).tolist(), + "petal_length": rng.random(nrows).tolist(), + "petal_width": rng.random(nrows).tolist(), + "species": rng.choice( + ["Iris-setosa", "Iris-versicolor", "Iris-virginica"], + nrows, + ).tolist(), + } + + df = pd.DataFrame(tabular_data) + if file_extension == ".csv": + outpath = Path(input_directory, "data.csv") + df.to_csv(outpath, index=False) + if file_extension == ".arrow": + outpath = Path(input_directory, "data.arrow") + df.to_feather(outpath) + + return input_directory, output_directory, file_extension diff --git a/clustering/hdbscan-clustering-tool/tests/test_cli.py b/clustering/hdbscan-clustering-tool/tests/test_cli.py new file mode 100644 index 0000000..11f46c0 --- /dev/null +++ b/clustering/hdbscan-clustering-tool/tests/test_cli.py @@ -0,0 +1,74 @@ +"""Test Command line Tool.""" + +from typer.testing import CliRunner +from polus.tabular.clustering.hdbscan_clustering.__main__ import app +import shutil +from pathlib import Path + + +def test_cli(generate_synthetic_data: tuple[Path, Path, str]) -> None: + """Test the command line.""" + inp_dir, out_dir, file_extension = generate_synthetic_data + pattern = r"\w+$" + file_pattern = f".*{file_extension}" + label = "species" + clustersize = 3 + + runner = CliRunner() + result = runner.invoke( + app, + [ + "--inpDir", + inp_dir, + "--filePattern", + file_pattern, + "--groupingPattern", + pattern, + "--averageGroups", + "--labelCol", + label, + "--minClusterSize", + clustersize, + "--incrementOutlierId", + "--outDir", + out_dir, + ], + ) + + assert result.exit_code == 0 + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) + + +def test_short_cli(generate_synthetic_data: tuple[Path, Path, str]) -> None: + """Test short command line.""" + inp_dir, out_dir, file_extension = generate_synthetic_data + pattern = r"\w+$" + file_pattern = f".*{file_extension}" + label = "species" + clustersize = 3 + + runner = CliRunner() + result = runner.invoke( + app, + [ + "-i", + inp_dir, + "-f", + file_pattern, + "-g", + pattern, + "-a", + "-l", + label, + "-m", + clustersize, + "-io", + "-o", + out_dir, + ], + ) + + assert result.exit_code == 0 + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) diff --git a/clustering/hdbscan-clustering-tool/tests/test_hdbscan_clustering.py b/clustering/hdbscan-clustering-tool/tests/test_hdbscan_clustering.py new file mode 100644 index 0000000..eb34f80 --- /dev/null +++ b/clustering/hdbscan-clustering-tool/tests/test_hdbscan_clustering.py @@ -0,0 +1,49 @@ +"""Test Hdbscan Clustering Plugin.""" + +import shutil +from pathlib import Path + +import filepattern as fp +import polus.tabular.clustering.hdbscan_clustering.hdbscan_clustering as hd +import vaex + + +def test_hdbscan_clustering(generate_synthetic_data: tuple[Path, Path, str]) -> None: + """Test hdbscan clustering of tabular data.""" + inp_dir, out_dir, file_extension = generate_synthetic_data + pattern = r"\w+$" + file_pattern = f".*{file_extension}" + files = fp.FilePattern(inp_dir, file_pattern) + for file in files(): + hd.hdbscan_clustering( + file=file[1][0], + min_cluster_size=3, + grouping_pattern=pattern, + label_col="species", + average_groups=True, + increment_outlier_id=True, + out_dir=out_dir, + ) + + out_ext = [Path(f.name).suffix for f in out_dir.iterdir()] + assert all(out_ext) is True + for f in out_dir.iterdir(): + df = vaex.open(f) + assert "cluster" in df.column_names + assert df["cluster"].values != 0 + shutil.rmtree(inp_dir) + shutil.rmtree(out_dir) + + +def test_hdbscan_model(generate_synthetic_data: tuple[Path, Path, str]) -> None: + """Test hdbscan model.""" + inp_dir, _, file_extension = generate_synthetic_data + file_pattern = f".*{file_extension}" + files = fp.FilePattern(inp_dir, file_pattern) + for file in files(): + df = vaex.open(file[1][0]) + data = df[df.column_names[:-1]].values + min_cluster_size = 3 + label = hd.hdbscan_model(data, min_cluster_size, True) + assert len(label) != 0 + shutil.rmtree(inp_dir) diff --git a/clustering/polus-feature-subsetting-plugin/Dockerfile b/clustering/polus-feature-subsetting-plugin/Dockerfile deleted file mode 100644 index babcd23..0000000 --- a/clustering/polus-feature-subsetting-plugin/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ - -FROM polusai/bfio:2.1.9 - -# from bfio container -# ENV POLUS_EXT=".ome.tif" -# ENV POLUS_LOG="INFO" -# ENV EXEC_DIR="/opt/executables" -# ENV DATA_DIR="/data" - -COPY VERSION / - -ARG EXEC_DIR="/opt/executables" -ARG DATA_DIR="/data" - -RUN mkdir -p ${EXEC_DIR} \ - && mkdir -p ${DATA_DIR}/inputs \ - && mkdir ${DATA_DIR}/outputs - -COPY src ${EXEC_DIR}/ -WORKDIR ${EXEC_DIR} - -RUN pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir - -ENTRYPOINT ["python3", "/opt/executables/main.py"] \ No newline at end of file diff --git a/clustering/polus-feature-subsetting-plugin/VERSION b/clustering/polus-feature-subsetting-plugin/VERSION deleted file mode 100644 index a34eaa5..0000000 --- a/clustering/polus-feature-subsetting-plugin/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.1.11 \ No newline at end of file diff --git a/clustering/polus-feature-subsetting-plugin/build-docker.sh b/clustering/polus-feature-subsetting-plugin/build-docker.sh deleted file mode 100644 index d9ad137..0000000 --- a/clustering/polus-feature-subsetting-plugin/build-docker.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -version=$(= thresh] - else: - keep_planes = [z for z in planes if feature_dict[z] <= thresh] - - return set(keep_planes) - -def make_uniform(planes_dict, uniques, padding): - """ Ensure each section has the same number of images - - This function makes the output collection uniform in - the sense that it preserves same number of planes across - sections. It also captures additional planes based - on the value of the padding variable - - Args: - planes_dict (dict): planes to keep in different sections - uniques (list): unique values for the major grouping variable - padding (int): additional images to capture outside cutoff - - Returns: - dictionary: dictionary containing planes to keep - """ - - # max no. of planes - max_len = max([len(i) for i in planes_dict.values()]) - - # max planes that can be added on each side - min_ind = min([min(planes_dict[k]) for k in planes_dict]) - max_ind = max([max(planes_dict[k]) for k in planes_dict]) - max_add_left = uniques.index(min_ind) - max_add_right = len(uniques) - (uniques.index(max_ind)+1) - - # add planes in each section based on padding and max number of planes - for section_id, planes in planes_dict.items(): - len_to_add = max_len - len(planes) - len_add_left = min(int(len_to_add)/2+padding, max_add_left) - len_add_right = min(len_to_add - len_add_left+padding, max_add_right) - left_ind = int(uniques.index(min(planes)) - len_add_left) - right_ind = int(uniques.index(max(planes)) + len_add_right)+1 - planes_dict[section_id] = uniques[left_ind:right_ind] - return planes_dict - -def main(inpDir,csvDir,outDir,filePattern,groupVar,percentile, - removeDirection,sectionVar,feature,padding,writeOutput): - """Function containing the main login to subset data - - Args: - inpDir (string): path to input image collection - csvDir (string): path to csv file containing features - outDir (string): path to output collection - filePattern (string): input image filepattern - groupVar (string): grouping variables - percentile (float): cutoff feature percentile - removeDirection (string): subset above or below percentile - sectionVar (string): sectioning variable - feature (string): feature to subset using - padding (int): capture additional images outside of cutoff - writeOutput (boolean): write output image collection or not - """ - - # Get all file names in csvDir image collection - csvDir_files = [f.name for f in Path(csvDir).iterdir() if f.is_file() and "".join(f.suffixes)=='.csv'] - - # Get all file names in inpDir image collection - inpDir_files = [f.name for f in Path(inpDir).iterdir() if f.is_file() and "".join(f.suffixes)=='.ome.tif'] - - # read and concat all csv files - for ind, file in enumerate(csvDir_files): - if ind == 0: - feature_df = pd.read_csv(os.path.join(csvDir, file), header=0) - else: - feature_df = pd.concat([feature_df, pd.read_csv(os.path.join(csvDir, file), header=0)]) - - # store image name and its feature value - feature_dict = {k:v for k,v in zip(feature_df['Image'], feature_df[feature])} - - # seperate filepattern variables into different categories - _,var = filepattern.get_regex(filePattern) - grouping_variables = groupVar.split(',') - section_variables = sectionVar.split(',') - sub_section_variables = [v for v in var if v not in grouping_variables+section_variables] - - # initialize filepattern object - fp = filepattern.FilePattern(inpDir, pattern=filePattern) - uniques = fp.uniques - - [maj_grouping_var, min_grouping_var] = grouping_variables if len(grouping_variables)>1 else grouping_variables+[None] - keep_planes = {} - - logger.info('Iterating over sections...') - # single iteration of this loop gives all images in one section - for file in fp(group_by=sub_section_variables+grouping_variables): - - section_feat_dict = {} - section_keep_planes = [] - section_id = tuple([file[0][i] for i in section_variables]) if section_variables[0] else 1 - - # iterate over files in one section - for f in file: - if min_grouping_var == None: - f[min_grouping_var] = None - - # stote feature values for images - if f[min_grouping_var] not in section_feat_dict: - section_feat_dict[f[min_grouping_var]] = {} - - if f[maj_grouping_var] not in section_feat_dict[f[min_grouping_var]]: - section_feat_dict[f[min_grouping_var]][f[maj_grouping_var]] = [] - - section_feat_dict[f[min_grouping_var]][f[maj_grouping_var]].append(feature_dict[f['file'].name]) - - # average feature value by grouping variable - for key1 in section_feat_dict: - for key2 in section_feat_dict[key1]: - section_feat_dict[key1][key2] = sum(section_feat_dict[key1][key2])/len(section_feat_dict[key1][key2]) - - # find planes to keep based on specified criteria - section_keep_planes.append(filter_planes(section_feat_dict[key1],removeDirection, percentile)) - - # keep same planes within a section, across the minor grouping variable - section_keep_planes = list(section_keep_planes[0].union(*section_keep_planes)) - section_keep_planes = [i for i in range(min(section_keep_planes), max(section_keep_planes)+1) if i in uniques[maj_grouping_var]] - keep_planes[section_id] = section_keep_planes - - # keep same number of planes across different sections - keep_planes = make_uniform(keep_planes, uniques[maj_grouping_var], padding) - - # start writing summary.txt - summary = open(os.path.join(outDir, 'metadata_files', 'summary.txt'), 'w') - - logger.info('renaming subsetted data') - - # reinitialize filepattern object - fp = filepattern.FilePattern(inpDir, pattern=filePattern) - - # rename subsetted data - for file in fp(group_by=sub_section_variables+grouping_variables): - section_id = tuple([file[0][i] for i in section_variables]) if section_variables[0] else 1 - section_keep_planes = keep_planes[section_id] - rename_map = {k:v for k,v in zip(keep_planes[section_id], uniques[maj_grouping_var])} - - # update summary.txt with section renaming info - summary.write('------------------------------------------------ \n') - if sectionVar.strip(): - summary.write('Section : {} \n'.format({k:file[0][k] for k in section_variables})) - logger.info('Renaming files from section : {} \n'.format({k:file[0][k] for k in section_variables})) - summary.write('\nThe following values of "{}" variable have been renamed: \n'.format(maj_grouping_var)) - for k,v in rename_map.items(): - summary.write('{} ---> {} \n'.format(k,v)) - summary.write('\n Files : \n \n') - - # rename and write output - for f in file: - if f[maj_grouping_var] not in keep_planes[section_id]: - continue - - # old and new file name - old_file_name = f['file'].name - file_name_dict = {k.upper():v for k,v in f.items() if k!='file'} - file_name_dict[maj_grouping_var.upper()] = rename_map[f[maj_grouping_var]] - new_file_name = fp.get_matching(**file_name_dict)[0]['file'].name - - # if write output collection - if writeOutput: - shutil.copy2(os.path.join(inpDir, old_file_name),os.path.join(outDir, 'images', new_file_name)) - - summary.write('{} -----> {} \n'.format(old_file_name, new_file_name)) - summary.close() - -if __name__=="__main__": - # Initialize the logger - logging.basicConfig(format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s', - datefmt='%d-%b-%y %H:%M:%S') - logger = logging.getLogger("main") - logger.setLevel(logging.INFO) - - ''' Argument parsing ''' - logger.info("Parsing arguments...") - parser = argparse.ArgumentParser(prog='main', description='Subset data using a given feature') - - # Input arguments - parser.add_argument('--csvDir', dest='csvDir', type=str, - help='CSV collection containing features', required=True) - parser.add_argument('--padding', dest='padding', type=str, - help='Number of images to capture outside the cutoff', required=False) - parser.add_argument('--feature', dest='feature', type=str, - help='Feature to use to subset data', required=True) - parser.add_argument('--filePattern', dest='filePattern', type=str, - help='Filename pattern used to separate data', required=True) - parser.add_argument('--groupVar', dest='groupVar', type=str, - help='variables to group by in a section', required=True) - parser.add_argument('--inpDir', dest='inpDir', type=str, - help='Input image collection to be processed by this plugin', required=True) - parser.add_argument('--percentile', dest='percentile', type=str, - help='Percentile to remove', required=True) - parser.add_argument('--removeDirection', dest='removeDirection', type=str, - help='remove direction above or below percentile', required=True) - parser.add_argument('--sectionVar', dest='sectionVar', type=str, - help='variables to divide larger sections', required=False) - parser.add_argument('--writeOutput', dest='writeOutput', type=str, - help='write output image collection or not', required=False) - # Output arguments - parser.add_argument('--outDir', dest='outDir', type=str, - help='Output collection', required=True) - - # Parse the arguments - args = parser.parse_args() - csvDir = args.csvDir - logger.info('csvDir = {}'.format(csvDir)) - padding = args.padding - padding = 0 if padding==None else int(padding) - logger.info('padding = {}'.format(padding)) - feature = args.feature - logger.info('feature = {}'.format(feature)) - filePattern = args.filePattern - logger.info('filePattern = {}'.format(filePattern)) - groupVar = args.groupVar - logger.info('groupVar = {}'.format(groupVar)) - inpDir = args.inpDir - if (Path.is_dir(Path(args.inpDir).joinpath('images'))): - # switch to images folder if present - fpath = str(Path(args.inpDir).joinpath('images').absolute()) - logger.info('inpDir = {}'.format(inpDir)) - percentile = float(args.percentile) - logger.info('percentile = {}'.format(percentile)) - removeDirection = args.removeDirection - logger.info('removeDirection = {}'.format(removeDirection)) - sectionVar = args.sectionVar - sectionVar = '' if sectionVar is None else sectionVar - logger.info('sectionVar = {}'.format(sectionVar)) - writeOutput = True if args.writeOutput==None else args.writeOutput == 'true' - logger.info('writeOutput = {}'.format(writeOutput)) - outDir = args.outDir - logger.info('outDir = {}'.format(outDir)) - - # create metadata and images folder in outDir - if not os.path.isdir(os.path.join(outDir, 'images')): - os.mkdir(os.path.join(outDir, 'images')) - if not os.path.isdir(os.path.join(outDir, 'metadata_files')): - os.mkdir(os.path.join(outDir, 'metadata_files')) - - # Surround with try/finally for proper error catching - try: - main(inpDir=inpDir, - csvDir=csvDir, - outDir=outDir, - filePattern=filePattern, - groupVar=groupVar, - percentile=percentile, - removeDirection=removeDirection, - sectionVar=sectionVar, - feature=feature, - padding=padding, - writeOutput=writeOutput) - - except Exception: - traceback.print_exc() - - finally: - logger.info('exiting workflow..') - # Exit the program - sys.exit() \ No newline at end of file diff --git a/clustering/polus-feature-subsetting-plugin/src/requirements.txt b/clustering/polus-feature-subsetting-plugin/src/requirements.txt deleted file mode 100644 index b7e965e..0000000 --- a/clustering/polus-feature-subsetting-plugin/src/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -filepattern>=1.4.5 -pandas>=1.1.3 \ No newline at end of file diff --git a/clustering/polus-hdbscan-clustering-plugin/Dockerfile b/clustering/polus-hdbscan-clustering-plugin/Dockerfile deleted file mode 100644 index 37129b3..0000000 --- a/clustering/polus-hdbscan-clustering-plugin/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM polusai/bfio:2.1.9 - -COPY VERSION / -COPY src ${EXEC_DIR}/. - -RUN apt --no-install-recommends -y autoremove --purge python3.9-minimal python3.9\ - && apt-get update && apt-get install --no-install-recommends -y build-essential python3.9-dev\ - && pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir - -ENTRYPOINT ["python3", "/opt/executables/main.py"] \ No newline at end of file diff --git a/clustering/polus-hdbscan-clustering-plugin/VERSION b/clustering/polus-hdbscan-clustering-plugin/VERSION deleted file mode 100644 index 5546bd2..0000000 --- a/clustering/polus-hdbscan-clustering-plugin/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.4.7 \ No newline at end of file diff --git a/clustering/polus-hdbscan-clustering-plugin/build-docker.sh b/clustering/polus-hdbscan-clustering-plugin/build-docker.sh deleted file mode 100755 index 7a7f44f..0000000 --- a/clustering/polus-hdbscan-clustering-plugin/build-docker.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -version=$( typing.List[str]: - """List all the .csv files in the directory. - - Args: - csv_directory (str): Path to the directory containing the csv files. - - Returns: - The path to directory, list of names of the subdirectories in dirpath (if any) and the filenames of .csv files. - - """ - list_of_files = [os.path.join(dirpath, file_name) - for dirpath, dirnames, files in os.walk(csv_directory) - for file_name in fnmatch.filter(files, '*.csv')] - return list_of_files - - -def clustering(data: np.ndarray, min_cluster_size: int, increment_outlier_id: bool) -> np.ndarray: - """Cluster data using HDBSCAN. - - Args: - data (array): Data that need to be clustered. - min_cluster_size (int): Smallest size grouping that should be considered as a cluster. - increment_outlier_id (bool) : Increment outlier ID to unity. - - Returns: - Cluster labels for each row of data. - """ - clusters = hdbscan.HDBSCAN(min_cluster_size=min_cluster_size).fit(data) - labels = clusters.labels_.flatten().astype(np.uint16) + 1 - labels = labels + 1 if increment_outlier_id else labels - - return labels - - -# Setup the argument parsing -def main(inpDir, grouping_pattern, avg_groups, label_col, min_cluster_size, increment_outlier_id, outDir): - # Get list of .csv files in the directory including sub folders for clustering - input_csvs = list_files(inpDir) - if input_csvs is None: - raise ValueError('No .csv files found.') - - for csv in input_csvs: - # Get the full path and split to get only the filename. - split_file = os.path.normpath(csv) - file_name = os.path.split(split_file)[-1] - file_prefix, _ = file_name.split('.', 1) - - logger.info('Reading the file ' + file_name) - - # Read csv file - df = pd.read_csv(csv) - - # If user provided a regular expression. - if grouping_pattern is not None: - df = df[df[label_col].str.match(grouping_pattern)].copy() - if df.empty: - logger.warning(f"Could not find any files matching the pattern {grouping_pattern} in file {csv}. Skipping...") - continue - - #Create a column group with matching string - df['group'] = df[label_col].str.extract(grouping_pattern, expand=True).apply(','.join, axis=1) - - # Get column(s) containing data. - df_data = df.select_dtypes(exclude='object').copy() - df_data['group'] = df['group'] - - # If we want to average features for each group. - if avg_groups: - df_grouped = df_data.groupby('group').apply(lambda x: x.sort_values('group').mean(numeric_only=True)) - - # Cluster data using HDBSCAN clustering. - logger.info('Clustering the data') - cluster_ids = clustering(df_grouped.values, min_cluster_size, increment_outlier_id) - - df_grouped['cluster'] = cluster_ids - df = df.merge(df_grouped['cluster'], left_on='group', right_index=True) - else: # We want separate clustering results for each group. - dfs = [] - for group, df_ss in df_data.groupby('group'): - # Cluster data using HDBSCAN clustering. - logger.info(f'Clustering data in group {group}') - - cluster_ids = clustering(df_ss.values, min_cluster_size, increment_outlier_id) - df_ss['cluster'] = cluster_ids - dfs.append(df_ss) - - df_grouped = pd.concat(dfs) - df = df.merge(df_grouped['cluster'], left_index=True, right_index=True) - - # No grouping. Vanilla clustering. - else: - # Get column(s) containing data. - df_data = df.select_dtypes(exclude='object').copy() - - #Cluster data using HDBSCAN clustering - logger.info('Clustering the data') - cluster_ids = clustering(df_data.values, min_cluster_size, increment_outlier_id) - df['cluster'] = cluster_ids - - df.to_csv(os.path.join(outDir, f'{file_prefix}.csv'), index=None, header=True, encoding='utf-8-sig') - logger.info("Finished all processes!") - -if __name__ == "__main__": - logger.info("Parsing arguments...") - parser = argparse.ArgumentParser(prog='main', description='HDBSCAN clustering plugin') - parser.add_argument('--inpDir', dest='inpDir', type=str, - help='Input collection-Data need to be clustered', required=True) - parser.add_argument('--groupingPattern', dest='groupingPattern', type=str, - help='Regular expression to group rows. Clustering will be applied across capture groups.', required=False) - parser.add_argument('--averageGroups', dest='averageGroups', type=str, - help='Whether to average data across groups. Requires capture groups.', default='false', required=False) - parser.add_argument('--labelCol', dest='labelCol', type=str, - help='Name of column containing labels. Required only for grouping operations.', required=False) - parser.add_argument('--minClusterSize', dest='minClusterSize', type=int, - help='Minimum cluster size', required=True) - parser.add_argument('--incrementOutlierId', dest='incrementOutlierId', type=str, - help='Increments outlier ID to 1.', default='false', required=False) - parser.add_argument('--outDir', dest='outDir', type=str, - help='Output collection', required=True) - - # Parse the arguments. - args = parser.parse_args() - - # Path to csvfile directory. - inpDir = args.inpDir - logger.info('inpDir = {}'.format(inpDir)) - - # Regular expression for grouping. - grouping_pattern = args.groupingPattern - logger.info('grouping_pattern = {}'.format(grouping_pattern)) - - # Whether to average data for each group. - avg_groups = args.averageGroups.lower() != 'false' - logger.info('avg_groups = {}'.format(avg_groups)) - - # Name of column to use for grouping. - label_col = args.labelCol - logger.info('label_col = {}'.format(label_col)) - - # Minimum cluster size for clustering using HDBSCAN. - min_cluster_size = args.minClusterSize - logger.info('min_cluster_size = {}'.format(min_cluster_size)) - - # Set outlier cluster id as 1. - increment_outlier_id = args.incrementOutlierId.lower() != 'false' - logger.info('increment_outlier_id = {}'.format(increment_outlier_id)) - - # Path to save output csvfiles. - outDir = args.outDir - logger.info('outDir = {}'.format(outDir)) - - main( - inpDir, - grouping_pattern, - avg_groups, - label_col, - min_cluster_size, - increment_outlier_id, - outDir - ) \ No newline at end of file diff --git a/clustering/polus-hdbscan-clustering-plugin/src/requirements.txt b/clustering/polus-hdbscan-clustering-plugin/src/requirements.txt deleted file mode 100644 index ffd72e0..0000000 --- a/clustering/polus-hdbscan-clustering-plugin/src/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -hdbscan==0.8.27 -pandas>=1.2.4 diff --git a/features/feature-segmentation-eval-tool/.bumpversion.cfg b/features/feature-segmentation-eval-tool/.bumpversion.cfg index 24647bf..3d5ebbd 100644 --- a/features/feature-segmentation-eval-tool/.bumpversion.cfg +++ b/features/feature-segmentation-eval-tool/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.2.6-dev0 +current_version = 0.2.6-dev1 commit = True tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? @@ -24,4 +24,8 @@ replace = version = "{new_version}" [bumpversion:file:VERSION] -[bumpversion:file:src/polus/images/features/feature_segmentation_eval/__init__.py] +[bumpversion:file:README.md] + +[bumpversion:file:ict.yaml] + +[bumpversion:file:src/polus/tabular/features/feature_segmentation_eval/__init__.py] diff --git a/features/feature-segmentation-eval-tool/Dockerfile b/features/feature-segmentation-eval-tool/Dockerfile index 73d0b9f..c7130f7 100644 --- a/features/feature-segmentation-eval-tool/Dockerfile +++ b/features/feature-segmentation-eval-tool/Dockerfile @@ -1,4 +1,4 @@ -FROM polusai/bfio:2.1.9 +FROM polusai/bfio:2.3.6 # environment variables defined in polusai/bfio ENV EXEC_DIR="/opt/executables" @@ -16,5 +16,5 @@ COPY src ${EXEC_DIR}/src RUN pip3 install ${EXEC_DIR} --no-cache-dir -ENTRYPOINT ["python3", "-m", "polus.images.features.feature_segmentation_eval"] +ENTRYPOINT ["python3", "-m", "polus.tabular.features.feature_segmentation_eval"] CMD ["--help"] diff --git a/features/feature-segmentation-eval-tool/README.md b/features/feature-segmentation-eval-tool/README.md index 9290a70..fca31b5 100644 --- a/features/feature-segmentation-eval-tool/README.md +++ b/features/feature-segmentation-eval-tool/README.md @@ -1,4 +1,4 @@ -# Feature segmentation eval (v0.2.3) +# Feature segmentation eval (v0.2.6-dev1) Plugin to generate evaluation metrics for feature comparison of ground truth and predicted images. Contact [Vishakha Goyal](mailto:vishakha.goyal@nih.gov), [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. diff --git a/features/feature-segmentation-eval-tool/VERSION b/features/feature-segmentation-eval-tool/VERSION index fccaf8b..9073550 100644 --- a/features/feature-segmentation-eval-tool/VERSION +++ b/features/feature-segmentation-eval-tool/VERSION @@ -1 +1 @@ -0.2.6-dev0 +0.2.6-dev1 diff --git a/features/feature-segmentation-eval-tool/ict.yaml b/features/feature-segmentation-eval-tool/ict.yaml index dbaeb14..8247062 100644 --- a/features/feature-segmentation-eval-tool/ict.yaml +++ b/features/feature-segmentation-eval-tool/ict.yaml @@ -2,10 +2,10 @@ author: - Vishakha Goyal - Hamdah Shafqat contact: vishakha.goyal@nih.gov -container: polusai/feature-segmentation-eval-tool:0.2.6-dev0 +container: polusai/feature-segmentation-eval-tool:0.2.6-dev1 description: Plugin to generate evaluation metrics for feature comparison of ground truth and predicted images. -entrypoint: python3 -m polus.images.features.feature_segmentation_eval +entrypoint: python3 -m polus.tabular.features.feature_segmentation_eval inputs: - description: Ground truth feature collection to be processed by this plugin. format: @@ -80,4 +80,4 @@ ui: key: inputs.preview title: Preview example output of this plugin type: checkbox -version: 0.2.6-dev0 +version: 0.2.6-dev1 diff --git a/features/feature-segmentation-eval-tool/plugin.json b/features/feature-segmentation-eval-tool/plugin.json index 5032daf..4f3f316 100644 --- a/features/feature-segmentation-eval-tool/plugin.json +++ b/features/feature-segmentation-eval-tool/plugin.json @@ -1,18 +1,18 @@ { "name": "Feature Segmentation Eval", - "version": "0.2.6-dev0", + "version": "0.2.6-dev1", "title": "Feature Segmentation Eval", "description": "Plugin to generate evaluation metrics for feature comparison of ground truth and predicted images.", "author": "Vishakha Goyal (vishakha.goyal@nih.gov), Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov)", "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/labshare/polus-plugins", + "repository": "https://github.com/PolusAI/tabular-tools", "website": "https://ncats.nih.gov/preclinical/core/informatics", "citation": "", - "containerId": "polusai/feature-segmentation-eval-tool:0.2.6-dev0", + "containerId": "polusai/feature-segmentation-eval-tool:0.2.6-dev1", "baseCommand": [ "python3", "-m", - "polus.images.features.feature_segmentation_eval" + "polus.tabular.features.feature_segmentation_eval" ], "inputs": [ { @@ -91,4 +91,4 @@ "description": "Generate an output preview." } ] -} +} \ No newline at end of file diff --git a/features/feature-segmentation-eval-tool/pyproject.toml b/features/feature-segmentation-eval-tool/pyproject.toml index e6452c7..b48e0b1 100644 --- a/features/feature-segmentation-eval-tool/pyproject.toml +++ b/features/feature-segmentation-eval-tool/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] -name = "polus-images-features-feature-segmentation-eval" -version = "0.2.6-dev0" +name = "polus-tabular-features-feature-segmentation-eval" +version = "0.2.6-dev1" description = "Feature segmentation eval" authors = [ "Vishakha Goyal ", @@ -17,9 +17,6 @@ scikit-learn="^1.4.0" pandas = "^1.2.4" scipy = "^1.6.2" typer = "^0.7.0" -blake3 = "^0.3.3" -llvmlite = "^0.39.1" -fastapi = "^0.92.0" vaex = "^4.7.0" [tool.poetry.group.dev.dependencies] diff --git a/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/__init__.py b/features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/__init__.py similarity index 79% rename from features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/__init__.py rename to features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/__init__.py index b81e997..99aedb4 100644 --- a/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/__init__.py +++ b/features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/__init__.py @@ -1,4 +1,4 @@ """Feature segmentation evaluation package.""" -__version__ = "0.2.6-dev0" +__version__ = "0.2.6-dev1" from . import feature_evaluation from . import metrics diff --git a/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/__main__.py b/features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/__main__.py similarity index 95% rename from features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/__main__.py rename to features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/__main__.py index 909ecd5..f1876e1 100644 --- a/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/__main__.py +++ b/features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/__main__.py @@ -7,7 +7,7 @@ from typing import Any import filepattern as fp -import polus.images.features.feature_segmentation_eval.feature_evaluation as fs +import polus.tabular.features.feature_segmentation_eval.feature_evaluation as fs import typer logging.basicConfig( @@ -15,7 +15,7 @@ datefmt="%d-%b-%y %H:%M:%S", ) POLUS_LOG = getattr(logging, environ.get("POLUS_LOG", "INFO")) -logger = logging.getLogger("polus.images.features.feature_segmentation_eval") +logger = logging.getLogger("polus.tabular.features.feature_segmentation_eval") logger.setLevel(POLUS_LOG) logging.getLogger("bfio").setLevel(POLUS_LOG) # Set number of threads for scalability diff --git a/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/feature_evaluation.py b/features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/feature_evaluation.py similarity index 100% rename from features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/feature_evaluation.py rename to features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/feature_evaluation.py diff --git a/features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/metrics.py b/features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/metrics.py similarity index 100% rename from features/feature-segmentation-eval-tool/src/polus/images/features/feature_segmentation_eval/metrics.py rename to features/feature-segmentation-eval-tool/src/polus/tabular/features/feature_segmentation_eval/metrics.py diff --git a/features/feature-segmentation-eval-tool/tests/test_cli.py b/features/feature-segmentation-eval-tool/tests/test_cli.py index c92f8ea..e54bcfd 100644 --- a/features/feature-segmentation-eval-tool/tests/test_cli.py +++ b/features/feature-segmentation-eval-tool/tests/test_cli.py @@ -3,7 +3,7 @@ from pathlib import Path from typing import Union -from polus.images.features.feature_segmentation_eval.__main__ import app +from polus.tabular.features.feature_segmentation_eval.__main__ import app from typer.testing import CliRunner diff --git a/features/feature-segmentation-eval-tool/tests/test_feature_single.py b/features/feature-segmentation-eval-tool/tests/test_feature_single.py index 505b8c4..d80376f 100644 --- a/features/feature-segmentation-eval-tool/tests/test_feature_single.py +++ b/features/feature-segmentation-eval-tool/tests/test_feature_single.py @@ -3,7 +3,7 @@ from pathlib import Path from typing import Union -import polus.images.features.feature_segmentation_eval.feature_evaluation as fs +import polus.tabular.features.feature_segmentation_eval.feature_evaluation as fs import pytest import vaex diff --git a/formats/arrow-to-tabular-tool/.bumpversion.cfg b/formats/arrow-to-tabular-tool/.bumpversion.cfg deleted file mode 100644 index 47c6f72..0000000 --- a/formats/arrow-to-tabular-tool/.bumpversion.cfg +++ /dev/null @@ -1,27 +0,0 @@ -[bumpversion] -current_version = 0.2.3-dev0 -commit = True -tag = False -parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? -serialize = - {major}.{minor}.{patch}-{release}{dev} - {major}.{minor}.{patch} - -[bumpversion:part:release] -optional_value = _ -first_value = dev -values = - dev - _ - -[bumpversion:part:dev] - -[bumpversion:file:pyproject.toml] -search = version = "{current_version}" -replace = version = "{new_version}" - -[bumpversion:file:plugin.json] - -[bumpversion:file:VERSION] - -[bumpversion:file:src/polus/images/formats/arrow_to_tabular/__init__.py] diff --git a/formats/arrow-to-tabular-tool/.gitignore b/formats/arrow-to-tabular-tool/.gitignore deleted file mode 100644 index c4aa6d8..0000000 --- a/formats/arrow-to-tabular-tool/.gitignore +++ /dev/null @@ -1,175 +0,0 @@ - #Byte-compiled / optimized / DLL files - __pycache__/ - *.py[cod] - *$py.class - - # C extensions - *.so - - # Distribution / packaging - .Python - build/ - develop-eggs/ - dist/ - downloads/ - eggs/ - .eggs/ - lib/ - lib64/ - parts/ - sdist/ - var/ - wheels/ - share/python-wheels/ - *.egg-info/ - .installed.cfg - *.egg - MANIFEST - - # PyInstaller - # Usually these files are written by a python script from a template - # before PyInstaller builds the exe, so as to inject date/other infos into it. - *.manifest - *.spec - - # Installer logs - pip-log.txt - pip-delete-this-directory.txt - - # Unit test / coverage reports - htmlcov/ - .tox/ - .nox/ - .coverage - .coverage.* - .cache - nosetests.xml - coverage.xml - *.cover - *.py,cover - .hypothesis/ - .pytest_cache/ - cover/ - - # Translations - *.mo - *.pot - - # Django stuff: - *.log - local_settings.py - db.sqlite3 - db.sqlite3-journal - - # Flask stuff: - instance/ - .webassets-cache - - # Scrapy stuff: - .scrapy - - # Sphinx documentation - docs/_build/ - - # PyBuilder - .pybuilder/ - target/ - - # Jupyter Notebook - .ipynb_checkpoints - - # IPython - profile_default/ - ipython_config.py - - # pyenv - # For a library or package, you might want to ignore these files since the code is - # intended to run in multiple environments; otherwise, check them in: - # .python-version - - # pipenv - # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. - # However, in case of collaboration, if having platform-specific dependencies or dependencies - # having no cross-platform support, pipenv may install dependencies that don't work, or not - # install all needed dependencies. - #Pipfile.lock - - # poetry - # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. - # This is especially recommended for binary packages to ensure reproducibility, and is more - # commonly ignored for libraries. - # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control - poetry.lock - ../../poetry.lock - - # pdm - # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. - #pdm.lock - # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it - # in version control. - # https://pdm.fming.dev/#use-with-ide - .pdm.toml - - # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm - __pypackages__/ - - # Celery stuff - celerybeat-schedule - celerybeat.pid - - # SageMath parsed files - *.sage.py - - # Environments - .env - .venv - env/ - venv/ - ENV/ - env.bak/ - venv.bak/ - - # Spyder project settings - .spyderproject - .spyproject - - # Rope project settings - .ropeproject - - # mkdocs documentation - /site - - # mypy - .mypy_cache/ - .dmypy.json - dmypy.json - - # Pyre type checker - .pyre/ - - # pytype static type analyzer - .pytype/ - - # Cython debug symbols - cython_debug/ - - # PyCharm - # JetBrains specific template is maintained in a separate JetBrains.gitignore that can - # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore - # and can be added to the global gitignore or merged into this file. For a more nuclear - # option (not recommended) you can uncomment the following to ignore the entire idea folder. - #.idea/ - - # vscode - .vscode - - # test data directory - data - # yaml file - .pre-commit-config.yaml - - # hidden files - .DS_Store - .ds_store - # flake8 - .flake8 diff --git a/formats/arrow-to-tabular-tool/README.md b/formats/arrow-to-tabular-tool/README.md deleted file mode 100644 index 5b9d36e..0000000 --- a/formats/arrow-to-tabular-tool/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# Arrow to Tabular (v0.2.0) -This WIPP plugin allows analysts to convert Arrow Feather File Format (V2) into the following file formats for researchers: \ - - `.parquet` \ - - `.csv` - -Contact [Kelechi Nina Mezu](mailto:nina.mezu@nih.gov), [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. - -For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). - -## Building - -To build the Docker image for the conversion plugin, run -`bash build-docker.sh`. - -## Install WIPP Plugin - -If WIPP is running, navigate to the plugins page and add a new plugin. Paste the -contents of `plugin.json` into the pop-up window and submit. - -## Options - -This plugin takes two input arguments and one output argument: - -| Name | Description | I/O | Type | -| --------------- | ------------------------------------------------------------ | ------ | ---------- | -| `--inpDir` | Input generic data collection to be processed by this plugin | Input | collection | -| `--fileFormat` | Filename pattern to convert | Input | string | -| `--outDir` | Output collection | Output | collection | -| `--preview` | Generate a JSON file with outputs | Output | JSON | diff --git a/formats/arrow-to-tabular-tool/VERSION b/formats/arrow-to-tabular-tool/VERSION deleted file mode 100644 index 3988334..0000000 --- a/formats/arrow-to-tabular-tool/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.2.3-dev0 diff --git a/formats/arrow-to-tabular-tool/arrowtotabular.cwl b/formats/arrow-to-tabular-tool/arrowtotabular.cwl deleted file mode 100644 index df3754c..0000000 --- a/formats/arrow-to-tabular-tool/arrowtotabular.cwl +++ /dev/null @@ -1,28 +0,0 @@ -class: CommandLineTool -cwlVersion: v1.2 -inputs: - fileFormat: - inputBinding: - prefix: --fileFormat - type: string - inpDir: - inputBinding: - prefix: --inpDir - type: Directory - outDir: - inputBinding: - prefix: --outDir - type: Directory -outputs: - outDir: - outputBinding: - glob: $(inputs.outDir.basename) - type: Directory -requirements: - DockerRequirement: - dockerPull: polusai/arrow-to-tabular-tool:0.2.3-dev0 - InitialWorkDirRequirement: - listing: - - entry: $(inputs.outDir) - writable: true - InlineJavascriptRequirement: {} diff --git a/formats/arrow-to-tabular-tool/build-docker.sh b/formats/arrow-to-tabular-tool/build-docker.sh deleted file mode 100755 index b1ddde0..0000000 --- a/formats/arrow-to-tabular-tool/build-docker.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -version=$(","Hamdah Shafqat abbasi "] -readme = "README.md" -packages = [{include = "polus", from = "src"}] - -[tool.poetry.dependencies] -python = ">=3.9,<3.12" -filepattern = "^2.0.4" -typer = "^0.7.0" -tqdm = "^4.64.1" -blake3 = "^0.3.3" -fcsparser = "^0.2.4" -llvmlite = "^0.39.1" -fastapi = "^0.92.0" -vaex = "^4.7.0" - - -[tool.poetry.group.dev.dependencies] -bump2version = "^1.0.1" -pre-commit = "^3.1.0" -black = "^23.1.0" -flake8 = "^6.0.0" -mypy = "^1.0.1" -pytest = "^7.2.1" -pandas = "^1.5.3" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/formats/arrow-to-tabular-tool/run-plugin.sh b/formats/arrow-to-tabular-tool/run-plugin.sh deleted file mode 100755 index 22f347e..0000000 --- a/formats/arrow-to-tabular-tool/run-plugin.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -#!/bin/bash -version=$( None: - """Execute Main function.""" - logger.info(f"inpDir = {inp_dir}") - logger.info(f"outDir = {out_dir}") - logger.info(f"fileFormat = {file_format}") - - inp_dir = inp_dir.resolve() - out_dir = out_dir.resolve() - - assert inp_dir.exists(), f"{inp_dir} doesnot exists!! Please check input path again" - assert ( - out_dir.exists() - ), f"{out_dir} doesnot exists!! Please check output path again" - FILE_EXT = os.environ.get("POLUS_TAB_EXT", ".csv") - - if file_format == Format.Default: - file_format = FILE_EXT - elif file_format == Format.CSV: - file_format = ".csv" - elif file_format == Format.PARQUET: - file_format = ".parquet" - elif file_format == None: - file_format = FILE_EXT - - assert file_format in [ - ".csv", - ".parquet", - ], f"This tabular file format: {file_format} is not support supported by this plugin!! Choose either CSV or Parquet FileFormat" - - pattern_list = [".feather", ".arrow"] - pattern = [f.suffix for f in inp_dir.iterdir() if f.suffix in pattern_list][0] - assert ( - pattern in pattern_list - ), f"This input file extension {pattern} is not support supported by this plugin!! It should be either .feather and .arrow files" - filepattern = {".feather": ".*.feather", ".arrow": ".*.arrow"} - - featherPattern = filepattern[pattern] - - fps = fp.FilePattern(inp_dir, featherPattern) - - if preview: - with open(pathlib.Path(out_dir, "preview.json"), "w") as jfile: - out_json: dict[str, Any] = { - "filepattern": featherPattern, - "outDir": [], - } - for file in fps(): - out_name = str(file[1][0].stem) + file_format - out_json["outDir"].append(out_name) - json.dump(out_json, jfile, indent=2) - - with ProcessPoolExecutor(max_workers) as executor: - processes = [] - for files in fps: - file = files[1][0] - processes.append(executor.submit(arrow_tabular, file, file_format, out_dir)) - - for process in tqdm( - as_completed(processes), desc="Arrow --> Tabular", total=len(processes) - ): - process.result() - - logger.info("Finished all processes!") - - -if __name__ == "__main__": - typer.run(main) diff --git a/formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/arrow_to_tabular.py b/formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/arrow_to_tabular.py deleted file mode 100644 index 719f324..0000000 --- a/formats/arrow-to-tabular-tool/src/polus/images/formats/arrow_to_tabular/arrow_to_tabular.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Arrow to Tabular.""" -import logging -import pathlib - -from enum import Enum -import vaex - -logger = logging.getLogger(__name__) - - - -class Format(str, Enum): - """Extension types to be converted.""" - CSV = ".csv" - PARQUET = ".parquet" - Default = "default" - - -def arrow_tabular(file: pathlib.Path, file_format: str, out_dir: pathlib.Path) -> None: - """Convert Arrow file into tabular file. - This plugin uses vaex to open an arrow file and converts into csv or parquet tabular data. - - Args: - file : Path to input file. - file_format : Filepattern of desired tabular output file. - out_dir: Path to output directory. - """ - file_name = pathlib.Path(file).stem - logger.info("Arrow Conversion: Copy ${file_name} into outDir for processing...") - - output_file = pathlib.Path(out_dir, (file_name + file_format)) - - logger.info("Arrow Conversion: Converting file into PyArrow Table") - - data = vaex.open(file) - logger.info("Arrow Conversion: table converted") - ncols = len(data) - chunk_size = max([2**24 // ncols, 1]) - - logger.info("Arrow Conversion: checking for file format") - - if file_format == ".csv": - logger.info("Arrow Conversion: Converting PyArrow Table into .csv file") - # Streaming contents of Arrow Table into csv - return data.export_csv(output_file, chunksize=chunk_size) - - elif file_format == ".parquet": - logger.info("Arrow Conversion: Converting PyArrow Table into .parquet file") - return data.export_parquet(output_file) - else: - logger.error( - "Arrow Conversion Error: This format is not supported in this plugin" - ) diff --git a/formats/arrow-to-tabular-tool/tests/__init__.py b/formats/arrow-to-tabular-tool/tests/__init__.py deleted file mode 100644 index d7bcf67..0000000 --- a/formats/arrow-to-tabular-tool/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Arrow to Tabular.""" diff --git a/formats/arrow-to-tabular-tool/tests/test_main.py b/formats/arrow-to-tabular-tool/tests/test_main.py deleted file mode 100644 index 9dd2147..0000000 --- a/formats/arrow-to-tabular-tool/tests/test_main.py +++ /dev/null @@ -1,69 +0,0 @@ -"""Testing of Arrow to Tabular plugin.""" -import os -import pathlib -import random -import string - -import filepattern as fp -import numpy as np -import pandas as pd -import pytest -from polus.images.formats.arrow_to_tabular.arrow_to_tabular import arrow_tabular - - -@pytest.fixture() -def generate_arrow(): - """Create pandas dataframe and convert into to arrow file format.""" - dirpath = os.path.abspath(os.path.join(__file__, "../..")) - inpDir = pathlib.Path(dirpath, "data/input") - outDir = pathlib.Path(dirpath, "data/output") - if not inpDir.exists(): - inpDir.mkdir(parents=True, exist_ok=True) - if not outDir.exists(): - outDir.mkdir(exist_ok=True, parents=True) - - df = pd.DataFrame( - { - "A": [random.choice(string.ascii_letters) for i in range(100)], - "B": np.random.randint(low=1, high=100, size=100), - "C": np.random.normal(0.0, 1.0, size=100), - }, - ) - df.to_feather(pathlib.Path(inpDir, "data.arrow")) - df.to_feather(pathlib.Path(inpDir, "data1.arrow")) - - return inpDir, outDir - - -def test_arrow_tabular(generate_arrow): - """Test of Arrow to Parquet file format.""" - pattern = ".parquet" - filePattern = {".csv": ".*.csv", ".parquet": ".*.parquet"} - out_pattern = filePattern[pattern] - in_pattern = ".*.arrow" - fps = fp.FilePattern(generate_arrow[0], in_pattern) - for file in fps(): - arrow_tabular(file[1][0], pattern, generate_arrow[1]) - - assert ( - all( - file[1][0].suffix - for file in fp.FilePattern(generate_arrow[1], out_pattern)() - ) - is True - ) - [os.remove(f) for f in generate_arrow[1].iterdir() if f.name.endswith(pattern)] - - pattern = ".csv" - out_pattern = filePattern[pattern] - fps = fp.FilePattern(generate_arrow[0], in_pattern) - for file in fps(): - arrow_tabular(file[1][0], pattern, generate_arrow[1]) - - assert ( - all( - file[1][0].suffix - for file in fp.FilePattern(generate_arrow[1], out_pattern)() - ) - is True - ) diff --git a/formats/polus-fcs-to-csv-converter-plugin/Dockerfile b/formats/polus-fcs-to-csv-converter-plugin/Dockerfile deleted file mode 100644 index 78be1a4..0000000 --- a/formats/polus-fcs-to-csv-converter-plugin/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM polusai/bfio:2.1.9 - -COPY VERSION ${EXEC_DIR} -COPY src ${EXEC_DIR}/ - -RUN pip3 install -r ${EXEC_DIR}/requirements.txt --no-cache-dir - -ENTRYPOINT ["python3", "/opt/executables/main.py"] \ No newline at end of file diff --git a/formats/polus-fcs-to-csv-converter-plugin/README.md b/formats/polus-fcs-to-csv-converter-plugin/README.md deleted file mode 100644 index fd4dc62..0000000 --- a/formats/polus-fcs-to-csv-converter-plugin/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Fcs to Csv file converter - -The fcs to csv file converter plugin converts fcs file to csv file.The input file should be in .fcs file format and output will be .csv file format. - -## Input: -The input should be a file in fcs format. - -## Output: -The output is a csv file. - -For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). - -## Building - -To build the Docker image for the conversion plugin, run -`./build-docker.sh`. - -## Install WIPP Plugin - -If WIPP is running, navigate to the plugins page and add a new plugin. Paste the contents of `plugin.json` into the pop-up window and submit. - -## Options - -This plugin takes eight input argument and one output argument: - -| Name | Description | I/O | Type | -| ---------- | ------------------------- | ------ | ------------- | -| `--inpDir` | Input fcs file collection | Input | collection | -| `--outDir` | Output collection | Output | csvCollection | - - diff --git a/formats/polus-fcs-to-csv-converter-plugin/VERSION b/formats/polus-fcs-to-csv-converter-plugin/VERSION deleted file mode 100644 index 28af839..0000000 --- a/formats/polus-fcs-to-csv-converter-plugin/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.2.5 \ No newline at end of file diff --git a/formats/polus-fcs-to-csv-converter-plugin/build-docker.sh b/formats/polus-fcs-to-csv-converter-plugin/build-docker.sh deleted file mode 100644 index 9a33106..0000000 --- a/formats/polus-fcs-to-csv-converter-plugin/build-docker.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -version=$(\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? @@ -22,6 +22,10 @@ replace = version = "{new_version}" [bumpversion:file:plugin.json] +[bumpversion:file:README.md] + +[bumpversion:file:ict.yaml] + [bumpversion:file:VERSION] -[bumpversion:file:src/polus/images/formats/tabular_converter/__init__.py] +[bumpversion:file:src/polus/tabular/formats/tabular_converter/__init__.py] diff --git a/formats/tabular-converter-tool/Dockerfile b/formats/tabular-converter-tool/Dockerfile index 3c3fd17..f33ed02 100644 --- a/formats/tabular-converter-tool/Dockerfile +++ b/formats/tabular-converter-tool/Dockerfile @@ -1,4 +1,4 @@ -FROM polusai/bfio:2.1.9 +FROM polusai/bfio:2.3.6 # environment variables defined in polusai/bfio ENV EXEC_DIR="/opt/executables" @@ -16,5 +16,5 @@ COPY src ${EXEC_DIR}/src RUN pip3 install ${EXEC_DIR} --no-cache-dir -ENTRYPOINT ["python3", "-m", "polus.images.formats.tabular_converter"] +ENTRYPOINT ["python3", "-m", "polus.tabular.formats.tabular_converter"] CMD ["--help"] diff --git a/formats/tabular-converter-tool/README.md b/formats/tabular-converter-tool/README.md index 8f650d4..dc5197f 100644 --- a/formats/tabular-converter-tool/README.md +++ b/formats/tabular-converter-tool/README.md @@ -1,4 +1,4 @@ -# Tabular Converter (v0.1.0) +# Tabular Converter (v0.1.2-dev1) This WIPP plugin allows the tabular data conversion to `arrow` file format and vice versa. Currently this plugins handles only the vaex supported file formats. This plugin supports the following file formats which are convertable into `arrow` file format: diff --git a/formats/tabular-converter-tool/VERSION b/formats/tabular-converter-tool/VERSION index 9d8d2c1..12fd03c 100644 --- a/formats/tabular-converter-tool/VERSION +++ b/formats/tabular-converter-tool/VERSION @@ -1 +1 @@ -0.1.2-dev0 +0.1.2-dev1 diff --git a/formats/tabular-converter-tool/ict.yaml b/formats/tabular-converter-tool/ict.yaml index fdfaf9e..9f929bc 100644 --- a/formats/tabular-converter-tool/ict.yaml +++ b/formats/tabular-converter-tool/ict.yaml @@ -2,10 +2,10 @@ author: - Kelechi Nina - Hamdah Shafqat contact: nina.mezu@nih.gov -container: polusai/tabular-converter-tool:0.1.2-dev0 +container: polusai/tabular-converter-tool:0.1.2-dev1 description: WIPP plugin allows tabular data conversion arrow file format and vice versa. -entrypoint: python3 -m polus.images.formats.tabular_converter +entrypoint: python3 -m polus.tabular.formats.tabular_converter inputs: - description: Input data collection to be processed by this plugin format: @@ -33,7 +33,7 @@ outputs: name: outDir required: true type: path -repository: https://github.com/PolusAI/polus-plugins +repository: https://github.com/PolusAI/tabular-tools specVersion: 1.0.0 title: Tabular Converter ui: @@ -58,4 +58,4 @@ ui: key: inputs.fileExtension title: FileExtension type: select -version: 0.1.2-dev0 +version: 0.1.2-dev1 diff --git a/formats/tabular-converter-tool/plugin.json b/formats/tabular-converter-tool/plugin.json index 6a92836..ea9cfbb 100644 --- a/formats/tabular-converter-tool/plugin.json +++ b/formats/tabular-converter-tool/plugin.json @@ -1,18 +1,18 @@ { "name": "Tabular Converter", - "version": "0.1.2-dev0", + "version": "0.1.2-dev1", "title": "Tabular Converter", "description": "WIPP plugin allows tabular data conversion arrow file format and vice versa.", "author": "Kelechi Nina Mezu (nina.mezu@nih.gov), Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov)", "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/PolusAI/polus-plugins", + "repository": "https://github.com/PolusAI/tabular-tools", "website": "https://ncats.nih.gov/preclinical/core/informatics", "citation": "", - "containerId": "polusai/tabular-converter-tool:0.1.2-dev0", + "containerId": "polusai/tabular-converter-tool:0.1.2-dev1", "baseCommand": [ "python3", "-m", - "polus.images.formats.tabular_converter" + "polus.tabular.formats.tabular_converter" ], "inputs": [ { @@ -72,4 +72,4 @@ "default": ".arrow" } ] -} +} \ No newline at end of file diff --git a/formats/tabular-converter-tool/pyproject.toml b/formats/tabular-converter-tool/pyproject.toml index d0083c3..4b18c74 100644 --- a/formats/tabular-converter-tool/pyproject.toml +++ b/formats/tabular-converter-tool/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] -name = "polus-images-formats-tabular-converter" -version = "0.1.2-dev0" +name = "polus-tabular-formats-tabular-converter" +version = "0.1.2-dev1" description = "This plugins allows to convert tabular data conversion into arrow file format and vice versa." authors = [ "Kelechi Nina Mezu ", @@ -17,9 +17,6 @@ tqdm = "^4.64.1" pyarrow = "^11.0.0" blake3 = "^0.3.3" fcsparser = "^0.2.4" -llvmlite = "^0.39.1" -fastapi = "^0.92.0" -astropy = "5.2.1" vaex = "^4.7.0" diff --git a/formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/__init__.py b/formats/tabular-converter-tool/src/polus/tabular/formats/tabular_converter/__init__.py similarity index 80% rename from formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/__init__.py rename to formats/tabular-converter-tool/src/polus/tabular/formats/tabular_converter/__init__.py index 1c0f1aa..2949f66 100644 --- a/formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/__init__.py +++ b/formats/tabular-converter-tool/src/polus/tabular/formats/tabular_converter/__init__.py @@ -1,5 +1,5 @@ """Tabular Converter.""" -__version__ = "0.1.2-dev0" +__version__ = "0.1.2-dev1" from .tabular_converter import ConvertTabular from .tabular_converter import Extensions diff --git a/formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/__main__.py b/formats/tabular-converter-tool/src/polus/tabular/formats/tabular_converter/__main__.py similarity index 95% rename from formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/__main__.py rename to formats/tabular-converter-tool/src/polus/tabular/formats/tabular_converter/__main__.py index 1801d00..452e80f 100644 --- a/formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/__main__.py +++ b/formats/tabular-converter-tool/src/polus/tabular/formats/tabular_converter/__main__.py @@ -10,7 +10,7 @@ import typer from tqdm import tqdm -from polus.images.formats.tabular_converter import tabular_converter as tc +from polus.tabular.formats.tabular_converter import tabular_converter as tc app = typer.Typer() # Set number of processors for scalability @@ -21,7 +21,7 @@ format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", datefmt="%d-%b-%y %H:%M:%S", ) -logger = logging.getLogger("polus.images.formats.tabular_converter") +logger = logging.getLogger("polus.tabular.formats.tabular_converter") logger.setLevel(logging.INFO) diff --git a/formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/tabular_converter.py b/formats/tabular-converter-tool/src/polus/tabular/formats/tabular_converter/tabular_converter.py similarity index 100% rename from formats/tabular-converter-tool/src/polus/images/formats/tabular_converter/tabular_converter.py rename to formats/tabular-converter-tool/src/polus/tabular/formats/tabular_converter/tabular_converter.py diff --git a/formats/tabular-converter-tool/tabularconverter.cwl b/formats/tabular-converter-tool/tabularconverter.cwl index 66d7feb..9185bee 100644 --- a/formats/tabular-converter-tool/tabularconverter.cwl +++ b/formats/tabular-converter-tool/tabularconverter.cwl @@ -24,7 +24,7 @@ outputs: type: Directory requirements: DockerRequirement: - dockerPull: polusai/tabular-converter-tool:0.1.2-dev0 + dockerPull: polusai/tabular-converter-tool:0.1.2-dev1 InitialWorkDirRequirement: listing: - entry: $(inputs.outDir) diff --git a/formats/tabular-converter-tool/tests/test_main.py b/formats/tabular-converter-tool/tests/test_main.py index b512a83..67518ba 100644 --- a/formats/tabular-converter-tool/tests/test_main.py +++ b/formats/tabular-converter-tool/tests/test_main.py @@ -12,7 +12,7 @@ import pytest import vaex from astropy.table import Table -from polus.images.formats.tabular_converter import tabular_converter as tc +from polus.tabular.formats.tabular_converter import tabular_converter as tc class Generatedata: diff --git a/formats/tabular-to-arrow-tool/.gitignore b/formats/tabular-to-arrow-tool/.gitignore deleted file mode 100644 index e891280..0000000 --- a/formats/tabular-to-arrow-tool/.gitignore +++ /dev/null @@ -1,175 +0,0 @@ -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -share/python-wheels/ -*.egg-info/ -.installed.cfg -*.egg -MANIFEST - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.nox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*.cover -*.py,cover -.hypothesis/ -.pytest_cache/ -cover/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py -db.sqlite3 -db.sqlite3-journal - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -.pybuilder/ -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# IPython -profile_default/ -ipython_config.py - -# pyenv -# For a library or package, you might want to ignore these files since the code is -# intended to run in multiple environments; otherwise, check them in: -# .python-version - -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. -#Pipfile.lock - -# poetry -# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. -# This is especially recommended for binary packages to ensure reproducibility, and is more -# commonly ignored for libraries. -# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control -poetry.lock -../../poetry.lock - -# pdm -# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. -#pdm.lock -# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it -# in version control. -# https://pdm.fming.dev/#use-with-ide -.pdm.toml - -# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm -__pypackages__/ - -# Celery stuff -celerybeat-schedule -celerybeat.pid - -# SageMath parsed files -*.sage.py - -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ - -# Spyder project settings -.spyderproject -.spyproject - -# Rope project settings -.ropeproject - -# mkdocs documentation -/site - -# mypy -.mypy_cache/ -.dmypy.json -dmypy.json - -# Pyre type checker -.pyre/ - -# pytype static type analyzer -.pytype/ - -# Cython debug symbols -cython_debug/ - -# PyCharm -# JetBrains specific template is maintained in a separate JetBrains.gitignore that can -# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore -# and can be added to the global gitignore or merged into this file. For a more nuclear -# option (not recommended) you can uncomment the following to ignore the entire idea folder. -#.idea/ - -# vscode -.vscode - -# test data directory -data -# yaml file -.pre-commit-config.yaml - -# hidden files -.DS_Store -.ds_store -# flake8 -.flake8 diff --git a/formats/tabular-to-arrow-tool/Dockerfile b/formats/tabular-to-arrow-tool/Dockerfile deleted file mode 100644 index aab61fb..0000000 --- a/formats/tabular-to-arrow-tool/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -FROM polusai/bfio:2.1.9 - -# environment variables defined in polusai/bfio -ENV EXEC_DIR="/opt/executables" -ENV POLUS_IMG_EXT=".ome.tif" -ENV POLUS_TAB_EXT=".arrow" - -# Work directory defined in the base container -WORKDIR ${EXEC_DIR} - -COPY pyproject.toml ${EXEC_DIR} -COPY VERSION ${EXEC_DIR} -COPY README.md ${EXEC_DIR} -RUN pip3 install --index-url https://test.pypi.org/simple/ filepattern==2.2.7 -COPY src ${EXEC_DIR}/src - -RUN pip3 install ${EXEC_DIR} --no-cache-dir - -ENTRYPOINT ["python3", "-m", "polus.images.formats.tabular_to_arrow"] -CMD ["--help"] diff --git a/formats/tabular-to-arrow-tool/README.md b/formats/tabular-to-arrow-tool/README.md deleted file mode 100644 index 888ad67..0000000 --- a/formats/tabular-to-arrow-tool/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# Tabular to Arrow (v0.2.0) - -This WIPP plugin allows analysts to convert various file formats received by researchers into Arrow Feather File Format (V2). This plugin supports the following file extensions: -- `fcs` -- `csv` -- `hdf5` -- `fits` -- `parquet` -- `feather` - -Contact [Kelechi Nina Mezu](mailto:nina.mezu@nih.gov), [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. - -For more information on WIPP, visit the [official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). - -## Building - -To build the Docker image for the conversion plugin, run -`bash build-docker.sh`. - -## Install WIPP Plugin - -If WIPP is running, navigate to the plugins page and add a new plugin. Paste the -contents of `plugin.json` into the pop-up window and submit. - -## Options - -This plugin takes two input arguments and one output argument: - -| Name | Description | I/O | Type | -|---------------|-------------------------|--------|--------| -| `--inpDir` | Input generic data collection to be processed by this plugin | Input | collection | -| `--filePattern` | Filename pattern to convert | Input | string | -| `--outDir` | Output collection | Output | collection | -| `--preview` | Generate JSON file with outputs | Output | JSON | diff --git a/formats/tabular-to-arrow-tool/VERSION b/formats/tabular-to-arrow-tool/VERSION deleted file mode 100644 index 3988334..0000000 --- a/formats/tabular-to-arrow-tool/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.2.3-dev0 diff --git a/formats/tabular-to-arrow-tool/build-docker.sh b/formats/tabular-to-arrow-tool/build-docker.sh deleted file mode 100755 index 194e843..0000000 --- a/formats/tabular-to-arrow-tool/build-docker.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -version=$(","hamshkhawar "] -readme = "README.md" -packages = [{include = "polus", from = "src"}] - -[tool.poetry.dependencies] -python = ">=3.9,<3.12" -filepattern = "^2.0.4" -typer = "^0.7.0" -tqdm = "^4.64.1" -pyarrow = "^11.0.0" -blake3 = "^0.3.3" -fcsparser = "^0.2.4" -llvmlite = "^0.39.1" -fastapi = "^0.92.0" -vaex = "^4.7.0" - - -[tool.poetry.group.dev.dependencies] -bump2version = "^1.0.1" -pre-commit = "^3.1.0" -black = "^23.1.0" -flake8 = "^6.0.0" -mypy = "^1.0.1" -pytest = "^7.2.1" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/formats/tabular-to-arrow-tool/run-plugin.sh b/formats/tabular-to-arrow-tool/run-plugin.sh deleted file mode 100755 index 6dc39b7..0000000 --- a/formats/tabular-to-arrow-tool/run-plugin.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -#!/bin/bash -version=$( None: - """Execute Main function.""" - logger.info(f"inpDir = {inp_dir}") - logger.info(f"outDir = {out_dir}") - logger.info(f"filePattern = {file_pattern}") - - assert inp_dir.exists(), f"{inp_dir} doesnot exist!! Please check input path again" - assert out_dir.exists(), f"{out_dir} doesnot exist!! Please check output path again" - - if file_pattern is None: - file_pattern = ".*" - else: - file_pattern = "".join([".*", file_pattern]) - - fps = fp.FilePattern(inp_dir, file_pattern) - - if preview: - with open(pathlib.Path(out_dir, "preview.json"), "w") as jfile: - out_json: dict[str, Any] = { - "filepattern": file_pattern, - "outDir": [], - } - for file in fps: - out_name = str(file[1][0].stem) + POLUS_TAB_EXT - out_json["outDir"].append(out_name) - json.dump(out_json, jfile, indent=2) - - processes = [] - with ProcessPoolExecutor(max_workers) as executor: - for files in fps: - file = files[1][0] - if file_pattern == ".*.fcs": - processes.append(executor.submit(tb.fcs_to_arrow, file, out_dir)) - else: - processes.append( - executor.submit(tb.df_to_arrow, file, file_pattern, out_dir) - ) - - for f in tqdm( - as_completed(processes), - desc=f"converting tabular data to {POLUS_TAB_EXT}", - total=len(processes), - ): - f.result() - - tb.remove_files(out_dir) - - logger.info("Finished all processes!") - - -if __name__ == "__main__": - app() diff --git a/formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/tabular_arrow_converter.py b/formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/tabular_arrow_converter.py deleted file mode 100644 index 1dfd452..0000000 --- a/formats/tabular-to-arrow-tool/src/polus/images/formats/tabular_to_arrow/tabular_arrow_converter.py +++ /dev/null @@ -1,131 +0,0 @@ -"""Tabular to Arrow.""" -import logging -import os -import pathlib - -import fcsparser -import vaex - -logger = logging.getLogger(__name__) - -POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".arrow") - - -def csv_to_df(file: pathlib.Path, out_dir: pathlib.Path) -> vaex.DataFrame: - """Convert csv into datafram or hdf5 file. - - Args: - file: Path to input file. - out_dir: Path to save the output csv file. - - Returns: - Vaex dataframe - - """ - logger.info("csv_to_df: Copy csv file into out_dir for processing...") - - logger.info("csv_to_df: Checking size of csv file...") - # Open csv file and count rows in file - with open(file, encoding="utf-8") as fr: - ncols = len(fr.readline().split(",")) - - chunk_size = max([2**24 // ncols, 1]) - logger.info("csv_to_df: # of columns are: " + str(ncols)) - - # Convert large csv files to hdf5 if more than 1,000,000 rows - logger.info("csv_to_df: converting file into hdf5 format") - df = vaex.from_csv(file, convert=True, chunk_size=chunk_size) - - return df - - -def binary_to_df(file: pathlib.Path, file_pattern: str) -> vaex.DataFrame: - """Convert any binary formats into vaex dataframe. - - Args: - file: Path to input file. - file_pattern : extension of file to convert. - - Returns: - Vaex dataframe. - Raises: - FileNotFoundError: An error occurred if input directory contains file extensions which are not supported by this plugin. - - """ - binary_patterns = [".*.fits", ".*.feather", ".*.parquet", ".*.hdf5", ".*.h5"] - - logger.info("binary_to_df: Scanning directory for binary file pattern... ") - if file_pattern in binary_patterns: - # convert hdf5 to vaex df - df = vaex.open(file) - return df - else: - raise FileNotFoundError( - "No supported binary file extensions were found in the directory. Please check file directory again." - ) - - -def fcs_to_arrow(file: pathlib.Path, out_dir: pathlib.Path) -> None: - """Convert fcs file to csv. Copied from polus-fcs-to-csv-converter plugin. - - Args: - file: Path to the directory containing the fcs file. - out_dir: Path to save the output csv file. - - """ - file_name = file.stem - outname = file_name + POLUS_TAB_EXT - outputfile = out_dir.joinpath(outname) - logger.info("fcs_to_feather : Begin parsing data out of .fcs file" + file_name) - - # Use fcsparser to parse data into python dataframe - _, data = fcsparser.parse(file, meta_data_only=False, reformat_meta=True) - - # Export the fcs data to vaex df - logger.info("fcs_to_feather: converting data to vaex dataframe...") - df = vaex.from_pandas(data) - logger.info("fcs_to_feather: writing file...") - logger.info( - "fcs_to_feather: Writing Vaex Dataframe to Feather File Format for:" + file_name - ) - df.export_feather(outputfile) - - -def df_to_arrow(file: pathlib.Path, file_pattern: str, out_dir: pathlib.Path) -> None: - """Convert vaex dataframe to Arrow feather file. - - Args: - file: Path to the directory to grab file. - file_pattern: File extension. - out_dir: Path to the directory to save feather file. - """ - file_name = file.stem - outname = file_name + POLUS_TAB_EXT - outputfile = out_dir.joinpath(outname) - - logger.info("df_to_feather: Scanning input directory files... ") - if file_pattern == ".*.csv": - # convert csv to vaex df or hdf5 - df = csv_to_df(file, out_dir) - else: - df = binary_to_df(file, file_pattern) - - logger.info("df_to_arrow: writing file...") - logger.info( - "df_to_arrow: Writing Vaex Dataframe to Feather File Format for:" + file_name - ) - df.export_feather(outputfile) - - -def remove_files(out_dir: pathlib.Path) -> None: - """Delete intermediate files other than arrow and json files from output directory. - - Args: - out_dir: Path to the output directory. - - """ - for f in out_dir.iterdir(): - if f.suffix not in [".arrow", ".json"]: - os.remove(f) - - logger.info("Done") diff --git a/formats/tabular-to-arrow-tool/tests/__init__.py b/formats/tabular-to-arrow-tool/tests/__init__.py deleted file mode 100644 index 04f992e..0000000 --- a/formats/tabular-to-arrow-tool/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Pytests of Tabular to Arrow plugin.""" diff --git a/formats/tabular-to-arrow-tool/tests/test_main.py b/formats/tabular-to-arrow-tool/tests/test_main.py deleted file mode 100644 index b2ca218..0000000 --- a/formats/tabular-to-arrow-tool/tests/test_main.py +++ /dev/null @@ -1,138 +0,0 @@ -"""Testing of Tabular to Arrow plugin.""" -import os -import pathlib -import random -import shutil -import string -import typing - -import fcsparser -import filepattern as fp -import numpy as np -import pandas as pd -import pytest -import vaex -from astropy.table import Table -from polus.images.formats.tabular_to_arrow import tabular_arrow_converter as tb - - -class Generatedata: - """Generate tabular data with several different file format.""" - - def __init__(self, file_pattern: str) -> None: - """Define instance attributes.""" - self.dirpath = os.path.abspath(os.path.join(__file__, "../..")) - self.inp_dir = pathlib.Path(self.dirpath, "data/input") - if not self.inp_dir.exists(): - self.inp_dir.mkdir(exist_ok=True, parents=True) - self.out_dir = pathlib.Path(self.dirpath, "data/output") - if not self.out_dir.exists(): - self.out_dir.mkdir(exist_ok=True, parents=True) - self.file_pattern = file_pattern - self.x = self.create_dataframe() - - def get_inp_dir(self) -> typing.Union[str, os.PathLike]: - """Get input directory.""" - return self.inp_dir - - def get_out_dir(self) -> typing.Union[str, os.PathLike]: - """Get output directory.""" - return self.out_dir - - def create_dataframe(self) -> pd.core.frame.DataFrame: - """Create Pandas dataframe.""" - return pd.DataFrame( - { - "A": [random.choice(string.ascii_letters) for i in range(100)], - "B": np.random.randint(low=1, high=100, size=100), - "C": np.random.normal(0.0, 1.0, size=100), - }, - ) - - def fits_func(self) -> None: - """Convert pandas dataframe to fits file format.""" - ft = Table.from_pandas(self.x) - ft.write(pathlib.Path(self.inp_dir, "data.fits"), overwrite=True) - - def fcs_func(self) -> None: - """Get the test example of fcs data.""" - fpath = fcsparser.test_sample_path - shutil.copy(fpath, self.inp_dir) - - def csv_func(self) -> None: - """Convert pandas dataframe to csv file format.""" - self.x.to_csv(pathlib.Path(self.inp_dir, "data.csv"), index=False) - - def parquet_func(self) -> None: - """Convert pandas dataframe to parquet file format.""" - self.x.to_parquet( - pathlib.Path(self.inp_dir, "data.parquet"), - engine="auto", - compression=None, - ) - - def feather_func(self) -> None: - """Convert pandas dataframe to feather file format.""" - self.x.to_feather(pathlib.Path(self.inp_dir, "data.feather")) - - def hdf_func(self) -> None: - """Convert pandas dataframe to hdf5 file format.""" - v_df = vaex.from_pandas(self.x, copy_index=False) - v_df.export(pathlib.Path(self.inp_dir, "data.hdf5")) - - def __call__(self) -> None: - """To make a class callable.""" - data_ext = { - ".hdf5": self.hdf_func, - ".csv": self.csv_func, - ".parquet": self.parquet_func, - ".feather": self.feather_func, - ".fits": self.fits_func, - ".fcs": self.fcs_func, - } - - return data_ext[self.file_pattern]() - - -FILE_EXT = [[".hdf5", ".parquet", ".csv", ".feather", ".fits", ".fcs"]] - - -@pytest.fixture(params=FILE_EXT) -def poly(request): - """To get the parameter of the fixture.""" - return request.param - - -def test_tabular_to_arrow(poly): - """Testing of tabular data conversion to arrow file format.""" - for i in poly: - if i != ".fcs": - d = Generatedata(i) - d() - file_pattern = f".*{i}" - fps = fp.FilePattern(d.get_inp_dir(), file_pattern) - for file in fps(): - tb.df_to_arrow(file[1][0], file_pattern, d.get_out_dir()) - - assert ( - all( - file[1][0].suffix - for file in fp.FilePattern(d.get_out_dir(), ".arrow") - ) - is True - ) - else: - d = Generatedata(".fcs") - d() - file_pattern = ".*.fcs" - fps = fp.FilePattern(d.get_out_dir(), file_pattern) - for file in fps(): - tb.fcs_to_arrow(file[1][0], d.get_out_dir()) - - assert ( - all( - file[1][0].suffix - for file in fp.FilePattern(d.get_out_dir(), ".arrow") - ) - is True - ) diff --git a/package.json b/package.json index 5c80de7..78175fd 100644 --- a/package.json +++ b/package.json @@ -1,17 +1,17 @@ { "name": "@polusai/tabular-tools", - "version": "0.1.0", + "version": "0.1.1", "description": "Monorepo for generic WIPP plugins", "scripts": {}, "repository": { "type": "git", - "url": "git+https://github.com/polusai/tabular-tools.git" + "url": "git+https://github.com/polusAI/tabular-tools.git" }, "license": "MIT", "bugs": { - "url": "https://github.com/polusai/tabular-tools/issues" + "url": "https://github.com/polusAI/tabular-tools/issues" }, - "homepage": "https://github.com/polusai/tabular-tools#readme", + "homepage": "https://github.com/polusAI/tabular-tools#readme", "devDependencies": { "@commitlint/cli": "^8.2.0", "@commitlint/config-conventional": "^8.2.0", diff --git a/pyproject.toml b/pyproject.toml index e05d93b..2b40a69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,10 +3,10 @@ authors = ["Nicholas Schaub ", "Camilo Velez "] -name = "polus-plugins" +name = "polus-tabular" packages = [{include = "polus", from = "src"}] readme = "README.md" -repository = "https://github.com/polusai/tabular-tools" +repository = "https://github.com/PolusAI/tabular-tools" version = "0.1.1" [tool.poetry.dependencies] diff --git a/ruff.toml b/ruff.toml index 20707d6..bf5e426 100644 --- a/ruff.toml +++ b/ruff.toml @@ -41,13 +41,15 @@ max-complexity = 12 [pydocstyle] convention = "google" -# Ignore `F401` (unused import violations) in all `__init__.py` files. [per-file-ignores] -"__init__.py" = ["F401"] -"__main__.py" = ["B008", "S101"] +"__init__.py" = ["F401"] # Unused import. +"__main__.py" = [ + "B008", + "S101", # Use of assert detected. +] "./**/tests/*.py" = [ "S101", # Use of assert detected. - "PLR2004", # Use of magic value in comparison. + "PLR2004", # Use of magic values detected. ] [isort] diff --git a/src/polus/tabular/__init__.py b/src/polus/tabular/__init__.py new file mode 100644 index 0000000..c5b74cc --- /dev/null +++ b/src/polus/tabular/__init__.py @@ -0,0 +1,65 @@ +"""Initialize polus-plugins module.""" + +import logging +from pathlib import Path +from typing import Union + +from polus.tabular._plugins.classes import ( + ComputePlugin, # pylint: disable=unused-import +) +from polus.tabular._plugins.classes import Plugin # pylint: disable=unused-import +from polus.tabular._plugins.classes import get_plugin # pylint: disable=unused-import +from polus.tabular._plugins.classes import list_plugins # pylint: disable=unused-import +from polus.tabular._plugins.classes import load_config # pylint: disable=unused-import +from polus.tabular._plugins.classes import refresh # pylint: disable=unused-import +from polus.tabular._plugins.classes import remove_all # pylint: disable=unused-import +from polus.tabular._plugins.classes import ( # pylint: disable=unused-import + remove_plugin, +) +from polus.tabular._plugins.classes import ( # pylint: disable=unused-import + submit_plugin, +) +from polus.tabular._plugins.update import ( # pylint: disable=unused-import + update_nist_plugins, +) +from polus.tabular._plugins.update import ( # pylint: disable=unused-import + update_polus_plugins, +) + +""" +Set up logging for the module +""" +logger = logging.getLogger("polus.tabular") + +with Path(__file__).parent.joinpath("_plugins/VERSION").open( + "r", + encoding="utf-8", +) as version_file: + VERSION = version_file.read().strip() + + +refresh() # calls the refresh method when library is imported + + +def __getattr__(name: str) -> Union[Plugin, ComputePlugin, list]: + if name == "list": + return list_plugins() + if name in list_plugins(): + return get_plugin(name) + if name in ["__version__", "VERSION"]: + return VERSION + msg = f"module '{__name__}' has no attribute '{name}'" + raise AttributeError(msg) + + +__all__ = [ + "refresh", + "submit_plugin", + "get_plugin", + "load_config", + "list_plugins", + "update_polus_plugins", + "update_nist_plugins", + "remove_all", + "remove_plugin", +] diff --git a/src/polus/tabular/_plugins/VERSION b/src/polus/tabular/_plugins/VERSION new file mode 100644 index 0000000..17e51c3 --- /dev/null +++ b/src/polus/tabular/_plugins/VERSION @@ -0,0 +1 @@ +0.1.1 diff --git a/src/polus/tabular/_plugins/__init__.py b/src/polus/tabular/_plugins/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/polus/tabular/_plugins/_compat.py b/src/polus/tabular/_plugins/_compat.py new file mode 100644 index 0000000..190aa0f --- /dev/null +++ b/src/polus/tabular/_plugins/_compat.py @@ -0,0 +1,4 @@ +"""Compat of Pydantic.""" +import pydantic + +PYDANTIC_V2 = pydantic.VERSION.startswith("2.") diff --git a/src/polus/tabular/_plugins/classes/__init__.py b/src/polus/tabular/_plugins/classes/__init__.py new file mode 100644 index 0000000..c6be7d4 --- /dev/null +++ b/src/polus/tabular/_plugins/classes/__init__.py @@ -0,0 +1,27 @@ +"""Plugin classes and functions.""" + +from polus.tabular._plugins.classes.plugin_classes import PLUGINS +from polus.tabular._plugins.classes.plugin_classes import ComputePlugin +from polus.tabular._plugins.classes.plugin_classes import Plugin +from polus.tabular._plugins.classes.plugin_classes import _load_plugin +from polus.tabular._plugins.classes.plugin_classes import get_plugin +from polus.tabular._plugins.classes.plugin_classes import list_plugins +from polus.tabular._plugins.classes.plugin_classes import load_config +from polus.tabular._plugins.classes.plugin_classes import refresh +from polus.tabular._plugins.classes.plugin_classes import remove_all +from polus.tabular._plugins.classes.plugin_classes import remove_plugin +from polus.tabular._plugins.classes.plugin_classes import submit_plugin + +__all__ = [ + "Plugin", + "ComputePlugin", + "submit_plugin", + "get_plugin", + "refresh", + "list_plugins", + "remove_plugin", + "remove_all", + "load_config", + "_load_plugin", + "PLUGINS", +] diff --git a/src/polus/tabular/_plugins/classes/plugin_base.py b/src/polus/tabular/_plugins/classes/plugin_base.py new file mode 100644 index 0000000..af22db4 --- /dev/null +++ b/src/polus/tabular/_plugins/classes/plugin_base.py @@ -0,0 +1,311 @@ +"""Methods for all plugin objects.""" +# pylint: disable=W1203, W0212, enable=W1201 +import enum +import json +import logging +import random +import signal +from pathlib import Path +from typing import Any +from typing import Optional +from typing import TypeVar +from typing import Union + +import fsspec +import yaml # type: ignore +from cwltool.context import RuntimeContext +from cwltool.factory import Factory +from cwltool.utils import CWLObjectType +from polus.tabular._plugins.cwl import CWL_BASE_DICT +from polus.tabular._plugins.io import input_to_cwl +from polus.tabular._plugins.io import io_to_yml +from polus.tabular._plugins.io import output_to_cwl +from polus.tabular._plugins.io import outputs_cwl +from polus.tabular._plugins.utils import name_cleaner +from python_on_whales import docker + +logger = logging.getLogger("polus.tabular") + +StrPath = TypeVar("StrPath", str, Path) + + +class IOKeyError(Exception): + """Raised when trying to set invalid I/O parameter.""" + + +class MissingInputValuesError(Exception): + """Raised when there are required input values that have not been set.""" + + +class BasePlugin: + """Base Class for Plugins.""" + + def _check_inputs(self) -> None: + """Check if all required inputs have been set.""" + _in = [x for x in self.inputs if x.required and not x.value] # type: ignore + if len(_in) > 0: + msg = f"{[x.name for x in _in]} are required inputs but have not been set" + raise MissingInputValuesError( + msg, # type: ignore + ) + + @property + def organization(self) -> str: + """Plugin container's organization.""" + return self.containerId.split("/")[0] + + def load_config(self, path: StrPath) -> None: + """Load configured plugin from file.""" + with Path(path).open(encoding="utf=8") as fw: + config = json.load(fw) + inp = config["inputs"] + out = config["outputs"] + for k, v in inp.items(): + if k in self._io_keys: + setattr(self, k, v) + for k, v in out.items(): + if k in self._io_keys: + setattr(self, k, v) + logger.debug(f"Loaded config from {path}") + + def run( + self, + gpus: Union[None, str, int] = "all", + **kwargs: Union[None, str, int], + ) -> None: + """Run plugin in Docker container.""" + self._check_inputs() + inp_dirs = [x for x in self.inputs if isinstance(x.value, Path)] + out_dirs = [x for x in self.outputs if isinstance(x.value, Path)] + + inp_dirs_dict = {x: f"/data/inputs/input{n}" for (n, x) in enumerate(inp_dirs)} + out_dirs_dict = { + x: f"/data/outputs/output{n}" for (n, x) in enumerate(out_dirs) + } + + mnts_in = [ + [f"type=bind,source={k},target={v},readonly"] # must be a list of lists + for (k, v) in inp_dirs_dict.items() + ] + mnts_out = [ + [f"type=bind,source={k},target={v}"] # must be a list of lists + for (k, v) in out_dirs_dict.items() + ] + + mnts = mnts_in + mnts_out + args = [] + + for i in self.inputs: + if i.value is not None: # do not include those with value=None + i._validate() + args.append(f"--{i.name}") + + if isinstance(i.value, Path): + args.append(inp_dirs_dict[str(i.value)]) + + elif isinstance(i.value, enum.Enum): + args.append(str(i.value._name_)) + + else: + args.append(str(i.value)) + + for o in self.outputs: + if o.value is not None: # do not include those with value=None + o._validate() + args.append(f"--{o.name}") + + if isinstance(o.value, Path): + args.append(out_dirs_dict[str(o.value)]) + + elif isinstance(o.value, enum.Enum): + args.append(str(o.value._name_)) + + else: + args.append(str(o.value)) + + random_int = random.randint(10, 99) # noqa: S311 # only for naming + container_name = f"polus{random_int}" + + def sig( + signal, # noqa # pylint: disable=W0613, W0621 + frame, # noqa # pylint: disable=W0613, W0621 + ) -> None: # signal handler to kill container when KeyboardInterrupt + logger.info(f"Exiting container {container_name}") + docker.kill(container_name) + + signal.signal( + signal.SIGINT, + sig, + ) # make of sig the handler for KeyboardInterrupt + if gpus is None: + logger.info( + f"""Running container without GPU. {self.__class__.__name__} + version {self.version!s}""", + ) + docker_ = docker.run( + self.containerId, + args, + name=container_name, + remove=True, + mounts=mnts, + **kwargs, + ) + print(docker_) # noqa + else: + logger.info( + f"""Running container with GPU: --gpus {gpus}. + {self.__class__.__name__} version {self.version!s}""", + ) + docker_ = docker.run( + self.containerId, + args, + gpus=gpus, + name=container_name, + remove=True, + mounts=mnts, + **kwargs, + ) + print(docker_) # noqa + + @property + def manifest(self) -> dict: + """Plugin manifest.""" + manifest_ = json.loads(self.json(exclude={"_io_keys", "versions", "id"})) + manifest_["version"] = manifest_["version"]["version"] + return manifest_ + + def __getattribute__(self, name: str) -> Any: # noqa + if name == "__class__": # pydantic v2 change + return super().__getattribute__(name) + if name != "_io_keys" and hasattr(self, "_io_keys") and name in self._io_keys: + value = self._io_keys[name].value + if isinstance(value, enum.Enum): + value = value.name + return value + + return super().__getattribute__(name) + + def __setattr__(self, name: str, value: Any) -> None: # noqa + if name == "_fs": + if not issubclass(type(value), fsspec.spec.AbstractFileSystem): + msg = "_fs must be an fsspec FileSystem" + raise ValueError(msg) + for i in self.inputs: + i._fs = value + for o in self.outputs: + o._fs = value + return + + if name != "_io_keys" and hasattr(self, "_io_keys"): + if name in self._io_keys: + logger.debug( + f"Value of {name} in {self.__class__.__name__} set to {value}", + ) + self._io_keys[name].value = value + return + msg = ( + f"Attempting to set {name} in " + "{self.__class__.__name__} but " + "{{name}} is not a valid I/O parameter" + ) + raise IOKeyError( + msg, + ) + + super().__setattr__(name, value) + + def _to_cwl(self) -> dict: + """Return CWL yml as dict.""" + cwl_dict = CWL_BASE_DICT + cwl_dict["inputs"] = {} + cwl_dict["outputs"] = {} + inputs = [input_to_cwl(x) for x in self.inputs] + inputs = inputs + [output_to_cwl(x) for x in self.outputs] + for inp in inputs: + cwl_dict["inputs"].update(inp) + outputs = [outputs_cwl(x) for x in self.outputs] + for out in outputs: + cwl_dict["outputs"].update(out) + cwl_dict["requirements"]["DockerRequirement"]["dockerPull"] = self.containerId + return cwl_dict + + def save_cwl(self, path: StrPath) -> Path: + """Save plugin as CWL command line tool.""" + if str(path).rsplit(".", maxsplit=1)[-1] != "cwl": + msg = "path must end in .cwl" + raise ValueError(msg) + with Path(path).open("w", encoding="utf-8") as file: + yaml.dump(self._to_cwl(), file) + return Path(path) + + @property + def _cwl_io(self) -> dict: + """Dict of I/O for CWL.""" + return { + x.name: io_to_yml(x) for x in self._io_keys.values() if x.value is not None + } + + def save_cwl_io(self, path: StrPath) -> Path: + """Save plugin's I/O values to yml file. + + To be used with CWL Command Line Tool. + """ + self._check_inputs() + if str(path).rsplit(".", maxsplit=1)[-1] != "yml": + msg = "path must end in .yml" + raise ValueError(msg) + with Path(path).open("w", encoding="utf-8") as file: + yaml.dump(self._cwl_io, file) + return Path(path) + + def run_cwl( + self, + cwl_path: Optional[StrPath] = None, + io_path: Optional[StrPath] = None, + ) -> Union[CWLObjectType, str, None]: + """Run configured plugin in CWL. + + Run plugin as a CWL command line tool after setting I/O values. + Two files will be generated: a CWL (`.cwl`) command line tool + and an I/O file (`.yml`). They will be generated in + current working directory if no paths are specified. Optional paths + for these files can be specified with arguments `cwl_path`, + and `io_path` respectively. + + Args: + cwl_path: [Optional] target path for `.cwl` file + io_path: [Optional] target path for `.yml` file + + """ + if not self.outDir: + msg = "" + raise ValueError(msg) + + if not cwl_path: + _p = Path.cwd().joinpath(name_cleaner(self.name) + ".cwl") + _cwl = self.save_cwl(_p) + else: + _cwl = self.save_cwl(cwl_path) + + if not io_path: + _p = Path.cwd().joinpath(name_cleaner(self.name) + ".yml") + self.save_cwl_io(_p) # saves io to make it visible to user + else: + self.save_cwl_io(io_path) # saves io to make it visible to user + + outdir_path = self.outDir.parent.relative_to(Path.cwd()) + r_c = RuntimeContext({"outdir": str(outdir_path)}) + fac = Factory(runtime_context=r_c) + cwl = fac.make(str(_cwl)) + return cwl(**self._cwl_io) # object's io dict is used instead of .yml file + + def __lt__(self, other: "BasePlugin") -> bool: + return self.version < other.version + + def __gt__(self, other: "BasePlugin") -> bool: + return other.version < self.version + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(name='{self.name}', version={self.version!s})" + ) diff --git a/src/polus/tabular/_plugins/classes/plugin_classes.py b/src/polus/tabular/_plugins/classes/plugin_classes.py new file mode 100644 index 0000000..68e3e9b --- /dev/null +++ b/src/polus/tabular/_plugins/classes/plugin_classes.py @@ -0,0 +1,472 @@ +"""Classes for Plugin objects containing methods to configure, run, and save.""" +# pylint: disable=W1203, W0212, enable=W1201 +import json +import logging +import shutil +import uuid +from copy import deepcopy +from pathlib import Path +from typing import Any +from typing import Optional +from typing import Union + +from polus.tabular._plugins._compat import PYDANTIC_V2 +from polus.tabular._plugins.classes.plugin_base import BasePlugin +from polus.tabular._plugins.io._io import DuplicateVersionFoundError +from polus.tabular._plugins.io._io import Version +from polus.tabular._plugins.io._io import _in_old_to_new +from polus.tabular._plugins.io._io import _ui_old_to_new +from polus.tabular._plugins.manifests import InvalidManifestError +from polus.tabular._plugins.manifests import _load_manifest +from polus.tabular._plugins.manifests import validate_manifest +from polus.tabular._plugins.models import ComputeSchema +from polus.tabular._plugins.models import PluginUIInput +from polus.tabular._plugins.models import PluginUIOutput +from polus.tabular._plugins.models import WIPPPluginManifest +from polus.tabular._plugins.utils import cast_version +from polus.tabular._plugins.utils import name_cleaner +from pydantic import ConfigDict + +logger = logging.getLogger("polus.tabular") +PLUGINS: dict[str, dict] = {} +# PLUGINS = {"BasicFlatfieldCorrectionPlugin": +# {Version('0.1.4'): Path(<...>), Version('0.1.5'): Path(<...>)}. +# "VectorToLabel": {Version(...)}} + +""" +Paths and Fields +""" +# Location to store any discovered plugin manifests +_PLUGIN_DIR = Path(__file__).parent.parent.joinpath("manifests") + + +def refresh() -> None: + """Refresh the plugin list.""" + organizations = [ + x for x in _PLUGIN_DIR.iterdir() if x.name != "__pycache__" and x.is_dir() + ] # ignore __pycache__ + + PLUGINS.clear() + + for org in organizations: + for file in org.iterdir(): + if file.suffix == ".py": + continue + + try: + plugin = validate_manifest(file) + except InvalidManifestError: + logger.warning(f"Validation error in {file!s}") + except BaseException as exc: # pylint: disable=W0718 + logger.warning(f"Unexpected error {exc} with {file!s}") + raise exc + + else: + key = name_cleaner(plugin.name) + # Add version and path to VERSIONS + if key not in PLUGINS: + PLUGINS[key] = {} + if ( + plugin.version in PLUGINS[key] + and file != PLUGINS[key][plugin.version] + ): + msg = ( + "Found duplicate version of plugin" + f"{plugin.name} in {_PLUGIN_DIR}" + ) + raise DuplicateVersionFoundError( + msg, + ) + PLUGINS[key][plugin.version] = file + + +def list_plugins() -> list: + """List all local plugins.""" + output = list(PLUGINS.keys()) + output.sort() + return output + + +def _get_config(plugin: Union["Plugin", "ComputePlugin"], class_: str) -> dict: + if PYDANTIC_V2: + model_ = json.loads(plugin.model_dump_json()) + model_["_io_keys"] = deepcopy(plugin._io_keys) # type: ignore + else: + # ignore mypy if pydantic < 2.0.0 + model_ = plugin.dict() # type: ignore + # iterate over I/O to convert to dict + for io_name, io in model_["_io_keys"].items(): + if PYDANTIC_V2: + model_["_io_keys"][io_name] = json.loads(io.model_dump_json()) + # overwrite val if enum + if io.type.value == "enum": + model_["_io_keys"][io_name]["value"] = io.value.name # str + elif io["type"] == "enum": # pydantic V1 + val_ = io["value"].name # mapDirectory.raw + model_["_io_keys"][io_name]["value"] = val_.split(".")[-1] # raw + for inp in model_["inputs"]: + inp["value"] = None + model_["class"] = class_ + return model_ + + +class Plugin(WIPPPluginManifest, BasePlugin): + """WIPP Plugin Class. + + Contains methods to configure, run, and save plugins. + + Attributes: + versions: A list of local available versions for this plugin. + + Methods: + save_manifest(path): save plugin manifest to specified path + """ + + id: uuid.UUID # noqa: A003 + if PYDANTIC_V2: + model_config = ConfigDict(extra="allow", frozen=True) + else: + + class Config: # pylint: disable=R0903 + """Config.""" + + extra = "allow" + allow_mutation = False + + def __init__(self, _uuid: bool = True, **data: dict) -> None: + """Init a plugin object from manifest.""" + if _uuid: + data["id"] = uuid.uuid4() # type: ignore + else: + data["id"] = uuid.UUID(str(data["id"])) # type: ignore + + if not PYDANTIC_V2: # pydantic V1 + data["version"] = cast_version(data["version"]) + + super().__init__(**data) + + if not PYDANTIC_V2: # pydantic V1 + self.Config.allow_mutation = True + + self._io_keys = {i.name: i for i in self.inputs} + self._io_keys.update({o.name: o for o in self.outputs}) + + if not self.author: + warn_msg = ( + f"The plugin ({self.name}) is missing the author field. " + "This field is not required but should be filled in." + ) + logger.warning(warn_msg) + + @property + def versions(self) -> list: # cannot be in PluginMethods because PLUGINS lives here + """Return list of local versions of a Plugin.""" + return list(PLUGINS[name_cleaner(self.name)]) + + def to_compute( + self, + hardware_requirements: Optional[dict] = None, + ) -> type[ComputeSchema]: + """Convert WIPP Plugin object to Compute Plugin object.""" + data = deepcopy(self.manifest) + return ComputePlugin( + hardware_requirements=hardware_requirements, + _from_old=True, + **data, + ) + + def save_manifest( + self, + path: Union[str, Path], + hardware_requirements: Optional[dict] = None, + compute: bool = False, + ) -> None: + """Save plugin manifest to specified path.""" + if compute: + with Path(path).open("w", encoding="utf-8") as file: + self.to_compute( + hardware_requirements=hardware_requirements, + ).save_manifest(path) + else: + with Path(path).open("w", encoding="utf-8") as file: + dict_ = self.manifest + json.dump( + dict_, + file, + indent=4, + ) + + logger.debug(f"Saved manifest to {path}") + + def __setattr__(self, name: str, value: Any) -> None: # noqa: ANN401 + """Set I/O parameters as attributes.""" + BasePlugin.__setattr__(self, name, value) + + def save_config(self, path: Union[str, Path]) -> None: + """Save manifest with configured I/O parameters to specified path.""" + with Path(path).open("w", encoding="utf-8") as file: + json.dump(_get_config(self, "WIPP"), file, indent=4, default=str) + logger.debug(f"Saved config to {path}") + + def __repr__(self) -> str: + """Print plugin name and version.""" + return BasePlugin.__repr__(self) + + +class ComputePlugin(ComputeSchema, BasePlugin): + """Compute Plugin Class. + + Contains methods to configure, run, and save plugins. + + Attributes: + versions: A list of local available versions for this plugin. + + Methods: + save_manifest(path): save plugin manifest to specified path + """ + + if PYDANTIC_V2: + model_config = ConfigDict(extra="allow", frozen=True) + else: # pydantic V1 + + class Config: # pylint: disable=R0903 + """Config.""" + + extra = "allow" + allow_mutation = False + + def __init__( + self, + hardware_requirements: Optional[dict] = None, + _from_old: bool = False, + _uuid: bool = True, + **data: dict, + ) -> None: + """Init a plugin object from manifest.""" + if _uuid: + data["id"] = uuid.uuid4() # type: ignore + else: + data["id"] = uuid.UUID(str(data["id"])) # type: ignore + + if _from_old: + + def _convert_input(dict_: dict) -> dict: + dict_["type"] = _in_old_to_new(dict_["type"]) + return dict_ + + def _convert_output(dict_: dict) -> dict: + dict_["type"] = "path" + return dict_ + + def _ui_in(dict_: dict) -> PluginUIInput: # assuming old all ui input + # assuming format inputs. ___ + inp = dict_["key"].split(".")[-1] # e.g inpDir + try: + type_ = [x["type"] for x in data["inputs"] if x["name"] == inp][ + 0 + ] # get type from i/o + except IndexError: + type_ = "string" # default to string + except BaseException as exc: + raise exc + + dict_["type"] = _ui_old_to_new(type_) + return PluginUIInput(**dict_) + + def _ui_out(dict_: dict) -> PluginUIOutput: + new_dict_ = deepcopy(dict_) + new_dict_["name"] = "outputs." + new_dict_["name"] + new_dict_["type"] = _ui_old_to_new(new_dict_["type"]) + return PluginUIOutput(**new_dict_) + + data["inputs"] = [_convert_input(x) for x in data["inputs"]] # type: ignore + data["outputs"] = [ + _convert_output(x) for x in data["outputs"] + ] # type: ignore + data["pluginHardwareRequirements"] = {} + data["ui"] = [_ui_in(x) for x in data["ui"]] # type: ignore + data["ui"].extend( # type: ignore[attr-defined] + [_ui_out(x) for x in data["outputs"]], + ) + + if hardware_requirements: + for k, v in hardware_requirements.items(): + data["pluginHardwareRequirements"][k] = v + + data["version"] = cast_version(data["version"]) + super().__init__(**data) + self.Config.allow_mutation = True + self._io_keys = {i.name: i for i in self.inputs} + self._io_keys.update({o.name: o for o in self.outputs}) # type: ignore + + if not self.author: + warn_msg = ( + f"The plugin ({self.name}) is missing the author field. " + "This field is not required but should be filled in." + ) + logger.warning(warn_msg) + + @property + def versions(self) -> list: # cannot be in PluginMethods because PLUGINS lives here + """Return list of local versions of a Plugin.""" + return list(PLUGINS[name_cleaner(self.name)]) + + def __setattr__(self, name: str, value: Any) -> None: # noqa: ANN401 + """Set I/O parameters as attributes.""" + BasePlugin.__setattr__(self, name, value) + + def save_config(self, path: Union[str, Path]) -> None: + """Save configured manifest with I/O parameters to specified path.""" + with Path(path).open("w", encoding="utf-8") as file: + json.dump(_get_config(self, "Compute"), file, indent=4, default=str) + logger.debug(f"Saved config to {path}") + + def save_manifest(self, path: Union[str, Path]) -> None: + """Save plugin manifest to specified path.""" + with Path(path).open("w", encoding="utf-8") as file: + json.dump(self.manifest, file, indent=4) + logger.debug(f"Saved manifest to {path}") + + def __repr__(self) -> str: + """Print plugin name and version.""" + return BasePlugin.__repr__(self) + + +def _load_plugin( + manifest: Union[str, dict, Path], +) -> Union[Plugin, ComputePlugin]: + """Parse a manifest and return one of Plugin or ComputePlugin.""" + manifest = _load_manifest(manifest) + if "pluginHardwareRequirements" in manifest: # type: ignore[operator] + # Parse the manifest + plugin = ComputePlugin(**manifest) # type: ignore[arg-type] + else: + # Parse the manifest + plugin = Plugin(**manifest) # type: ignore[arg-type] + return plugin + + +def submit_plugin( + manifest: Union[str, dict, Path], +) -> Union[Plugin, ComputePlugin]: + """Parse a plugin and create a local copy of it. + + This function accepts a plugin manifest as a string, a dictionary (parsed + json), or a pathlib.Path object pointed at a plugin manifest. + + Args: + manifest: + A plugin manifest. It can be a url, a dictionary, + a path to a JSON file or a string that can be parsed as a dictionary + + Returns: + A Plugin object populated with information from the plugin manifest. + """ + plugin = validate_manifest(manifest) + plugin_name = name_cleaner(plugin.name) + + # Get Major/Minor/Patch versions + out_name = ( + plugin_name + + f"_M{plugin.version.major}m{plugin.version.minor}p{plugin.version.patch}.json" + ) + + # Save the manifest if it doesn't already exist in the database + organization = plugin.containerId.split("/")[0] + org_path = _PLUGIN_DIR.joinpath(organization.lower()) + org_path.mkdir(exist_ok=True, parents=True) + if not org_path.joinpath(out_name).exists(): + with org_path.joinpath(out_name).open("w", encoding="utf-8") as file: + if not PYDANTIC_V2: # pydantic V1 + manifest_ = plugin.dict() # type: ignore + manifest_["version"] = manifest_["version"]["version"] + else: # PYDANTIC V2 + manifest_ = json.loads(plugin.model_dump_json()) + json.dump(manifest_, file, indent=4) + + # Refresh plugins list + refresh() + return plugin + + +def get_plugin( + name: str, + version: Optional[str] = None, +) -> Union[Plugin, ComputePlugin]: + """Get a plugin with option to specify version. + + Return a plugin object with the option to specify a version. + The specified version's manifest must exist in manifests folder. + + Args: + name: Name of the plugin. + version: Optional version of the plugin, must follow semver. + + Returns: + Plugin object + """ + if version is None: + return _load_plugin(PLUGINS[name][max(PLUGINS[name])]) + if PYDANTIC_V2: + return _load_plugin(PLUGINS[name][Version(version)]) + return _load_plugin(PLUGINS[name][Version(**{"version": version})]) # Pydantic V1 + + +def load_config(config: Union[dict, Path, str]) -> Union[Plugin, ComputePlugin]: + """Load configured plugin from config file/dict.""" + if isinstance(config, (Path, str)): + with Path(config).open("r", encoding="utf-8") as file: + manifest_ = json.load(file) + elif isinstance(config, dict): + manifest_ = config + else: + msg = "config must be a dict, str, or a path" + raise TypeError(msg) + io_keys_ = manifest_["_io_keys"] + class_ = manifest_["class"] + manifest_.pop("class", None) + if class_ == "Compute": + plugin_ = ComputePlugin(_uuid=False, **manifest_) + elif class_ == "WIPP": + plugin_ = Plugin(_uuid=False, **manifest_) + else: + msg = "Invalid value of class" + raise ValueError(msg) + for key, value_ in io_keys_.items(): + val = value_["value"] + if val is not None: # exclude those values not set + setattr(plugin_, key, val) + return plugin_ + + +def remove_plugin(plugin: str, version: Optional[Union[str, list[str]]] = None) -> None: + """Remove plugin from the local database.""" + if version is None: + for plugin_version in PLUGINS[plugin]: + remove_plugin(plugin, plugin_version) + else: + if isinstance(version, list): + for version_ in version: + remove_plugin(plugin, version_) + return + if not PYDANTIC_V2: # pydantic V1 + if not isinstance(version, Version): + version_ = cast_version(version) + else: + version_ = version + else: # pydanitc V2 + version_ = Version(version) if not isinstance(version, Version) else version + path = PLUGINS[plugin][version_] + path.unlink() + refresh() + + +def remove_all() -> None: + """Remove all plugins from the local database.""" + organizations = [ + x for x in _PLUGIN_DIR.iterdir() if x.name != "__pycache__" and x.is_dir() + ] # ignore __pycache__ + logger.warning("Removing all plugins from local database") + for org in organizations: + shutil.rmtree(org) + refresh() diff --git a/src/polus/tabular/_plugins/cwl/__init__.py b/src/polus/tabular/_plugins/cwl/__init__.py new file mode 100644 index 0000000..966ef2d --- /dev/null +++ b/src/polus/tabular/_plugins/cwl/__init__.py @@ -0,0 +1,3 @@ +from .cwl import CWL_BASE_DICT + +__all__ = ["CWL_BASE_DICT"] diff --git a/src/polus/tabular/_plugins/cwl/base.cwl b/src/polus/tabular/_plugins/cwl/base.cwl new file mode 100644 index 0000000..7a86922 --- /dev/null +++ b/src/polus/tabular/_plugins/cwl/base.cwl @@ -0,0 +1,17 @@ +#!/usr/bin/env cwl-runner + +cwlVersion: v1.2 +class: CommandLineTool + +requirements: + DockerRequirement: + dockerPull: + InitialWorkDirRequirement: + listing: + - writable: true + entry: $(inputs.outDir) + InlineJavascriptRequirement: {} + +inputs: + +outputs: diff --git a/src/polus/tabular/_plugins/cwl/cwl.py b/src/polus/tabular/_plugins/cwl/cwl.py new file mode 100644 index 0000000..59a1163 --- /dev/null +++ b/src/polus/tabular/_plugins/cwl/cwl.py @@ -0,0 +1,7 @@ +from pathlib import Path + +import yaml # type: ignore + +PATH = Path(__file__) +with open(PATH.with_name("base.cwl"), "rb") as cwl_file: + CWL_BASE_DICT = yaml.full_load(cwl_file) diff --git a/src/polus/tabular/_plugins/gh.py b/src/polus/tabular/_plugins/gh.py new file mode 100644 index 0000000..791e0a7 --- /dev/null +++ b/src/polus/tabular/_plugins/gh.py @@ -0,0 +1,65 @@ +"""GitHub utilties.""" +import logging +import os +from urllib.parse import urljoin + +import github + +from polus.tabular._plugins.classes import submit_plugin + +logger = logging.getLogger("polus.tabular") + +""" +Initialize the Github interface +""" + + +def _init_github(auth=None): + if auth is None: + # Try to get an auth key from an environment variable + auth = os.environ.get("GITHUB_AUTH", None) + + if auth is None: + gh = github.Github() + logger.warning("Initialized Github connection with no user token.") + return gh + else: + logger.debug("Found auth token in GITHUB_AUTH environment variable.") + + else: + logger.debug("Github auth token supplied as input.") + + gh = github.Github(login_or_token=auth) + logger.debug( + f"Initialized Github connection with token for user: {gh.get_user().login}" + ) + + return gh + + +def add_plugin_from_gh( + user: str, + branch: str, + plugin: str, + repo: str = "polus-plugins", + manifest_name: str = "plugin.json", +): + """Add plugin from GitHub. + + This function adds a plugin hosted on GitHub and returns a Plugin object. + + Args: + user: GitHub username + branch: GitHub branch + plugin: Plugin's name + repo: Name of GitHub repository, default is `polus-plugins` + manifest_name: Name of manifest file, default is `plugin.json` + + Returns: + A Plugin object populated with information from the plugin manifest. + """ + l1 = [user, repo, branch, plugin, manifest_name] + u = "/".join(l1) + url = urljoin("https://raw.githubusercontent.com", u) + logger.info("Adding %s" % url) + return submit_plugin(url, refresh=True) diff --git a/src/polus/tabular/_plugins/io/__init__.py b/src/polus/tabular/_plugins/io/__init__.py new file mode 100644 index 0000000..0687084 --- /dev/null +++ b/src/polus/tabular/_plugins/io/__init__.py @@ -0,0 +1,21 @@ +"""Init IO module.""" + +from polus.tabular._plugins.io._io import Input +from polus.tabular._plugins.io._io import IOBase +from polus.tabular._plugins.io._io import Output +from polus.tabular._plugins.io._io import Version +from polus.tabular._plugins.io._io import input_to_cwl +from polus.tabular._plugins.io._io import io_to_yml +from polus.tabular._plugins.io._io import output_to_cwl +from polus.tabular._plugins.io._io import outputs_cwl + +__all__ = [ + "Input", + "Output", + "IOBase", + "Version", + "io_to_yml", + "outputs_cwl", + "input_to_cwl", + "output_to_cwl", +] diff --git a/src/polus/tabular/_plugins/io/_io.py b/src/polus/tabular/_plugins/io/_io.py new file mode 100644 index 0000000..aa44cf9 --- /dev/null +++ b/src/polus/tabular/_plugins/io/_io.py @@ -0,0 +1,597 @@ +# type: ignore +# ruff: noqa: S101, A003 +# pylint: disable=no-self-argument, C0412 +"""Plugins I/O utilities.""" +import enum +import logging +import pathlib +import re +from functools import singledispatch +from functools import singledispatchmethod +from typing import Any +from typing import Optional +from typing import TypeVar +from typing import Union + +import fsspec +from polus.tabular._plugins._compat import PYDANTIC_V2 +from pydantic import BaseModel +from pydantic import Field +from pydantic import PrivateAttr + +if PYDANTIC_V2: + from typing import Annotated + + from pydantic import RootModel + from pydantic import StringConstraints + from pydantic import field_validator +else: + from pydantic import constr + from pydantic import validator + +logger = logging.getLogger("polus.tabular") + +""" +Enums for validating plugin input, output, and ui components. +""" +WIPP_TYPES = { + "collection": pathlib.Path, + "pyramid": pathlib.Path, + "csvCollection": pathlib.Path, + "genericData": pathlib.Path, + "stitchingVector": pathlib.Path, + "notebook": pathlib.Path, + "tensorflowModel": pathlib.Path, + "tensorboardLogs": pathlib.Path, + "pyramidAnnotation": pathlib.Path, + "integer": int, + "number": float, + "string": str, + "boolean": bool, + "array": str, + "enum": enum.Enum, + "path": pathlib.Path, +} + + +class InputTypes(str, enum.Enum): # wipp schema + """Enum of Input Types for WIPP schema.""" + + COLLECTION = "collection" + PYRAMID = "pyramid" + CSVCOLLECTION = "csvCollection" + GENERICDATA = "genericData" + STITCHINGVECTOR = "stitchingVector" + NOTEBOOK = "notebook" + TENSORFLOWMODEL = "tensorflowModel" + TENSORBOARDLOGS = "tensorboardLogs" + PYRAMIDANNOTATION = "pyramidAnnotation" + INTEGER = "integer" + NUMBER = "number" + STRING = "string" + BOOLEAN = "boolean" + ARRAY = "array" + ENUM = "enum" + + +class OutputTypes(str, enum.Enum): # wipp schema + """Enum for Output Types for WIPP schema.""" + + COLLECTION = "collection" + PYRAMID = "pyramid" + CSVCOLLECTION = "csvCollection" + GENERICDATA = "genericData" + STITCHINGVECTOR = "stitchingVector" + NOTEBOOK = "notebook" + TENSORFLOWMODEL = "tensorflowModel" + TENSORBOARDLOGS = "tensorboardLogs" + PYRAMIDANNOTATION = "pyramidAnnotation" + + +def _in_old_to_new(old: str) -> str: # map wipp InputType to compute schema's InputType + """Map an InputType from wipp schema to one of compute schema.""" + d = {"integer": "number", "enum": "string"} + if old in ["string", "array", "number", "boolean"]: + return old + if old in d: + return d[old] # integer or enum + return "path" # everything else + + +def _ui_old_to_new(old: str) -> str: # map wipp InputType to compute schema's UIType + """Map an InputType from wipp schema to a UIType of compute schema.""" + type_dict = { + "string": "text", + "boolean": "checkbox", + "number": "number", + "array": "text", + "integer": "number", + } + if old in type_dict: + return type_dict[old] + return "text" + + +FileSystem = TypeVar("FileSystem", bound=fsspec.spec.AbstractFileSystem) + + +class IOBase(BaseModel): # pylint: disable=R0903 + """Base Class for I/O arguments.""" + + type: Any = None + options: Optional[dict] = None + value: Optional[Any] = None + id_: Optional[Any] = None + _fs: Optional[FileSystem] = PrivateAttr( + default=None, + ) # type checking is done at plugin level + + def _validate(self) -> None: # pylint: disable=R0912 + value = self.value + + if value is None: + if self.required: + msg = f""" + The input value ({self.name}) is required, + but the value was not set.""" + raise TypeError( + msg, + ) + + return + + if self.type == InputTypes.ENUM: + try: + if isinstance(value, str): + value = enum.Enum(self.name, self.options["values"])[value] + elif not isinstance(value, enum.Enum): + raise ValueError + + except KeyError: + logging.error( + f""" + Value ({value}) is not a valid value + for the enum input ({self.name}). + Must be one of {self.options['values']}. + """, + ) + raise + else: + if isinstance(self.type, (InputTypes, OutputTypes)): # wipp + value = WIPP_TYPES[self.type](value) + else: + value = WIPP_TYPES[self.type.value]( + value, + ) # compute, type does not inherit from str + + if isinstance(value, pathlib.Path): + value = value.absolute() + if self._fs: + assert self._fs.exists( + str(value), + ), f"{value} is invalid or does not exist" + assert self._fs.isdir( + str(value), + ), f"{value} is not a valid directory" + else: + assert value.exists(), f"{value} is invalid or does not exist" + assert value.is_dir(), f"{value} is not a valid directory" + + super().__setattr__("value", value) + + def __setattr__(self, name: str, value: Any) -> None: # ruff: noqa: ANN401 + """Set I/O attributes.""" + if name not in ["value", "id", "_fs"]: + # Don't permit any other values to be changed + msg = f"Cannot set property: {name}" + raise TypeError(msg) + + super().__setattr__(name, value) + + if name == "value": + self._validate() + + +class Output(IOBase): # pylint: disable=R0903 + """Required until JSON schema is fixed.""" + + if PYDANTIC_V2: + name: Annotated[ + str, + StringConstraints(pattern=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$"), + ] = Field( + ..., + examples=["outputCollection"], + title="Output name", + ) + description: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( + ..., + examples=["Output collection"], + title="Output description", + ) + else: + name: constr(regex=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$") = Field( + ..., + examples=["outputCollection"], + title="Output name", + ) + description: constr(regex=r"^(.*)$") = Field( + ..., + examples=["Output collection"], + title="Output description", + ) + type: OutputTypes = Field( + ..., + examples=["stitchingVector", "collection"], + title="Output type", + ) + + +class Input(IOBase): # pylint: disable=R0903 + """Required until JSON schema is fixed.""" + + if PYDANTIC_V2: + name: Annotated[ + str, + StringConstraints(pattern=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$"), + ] = Field( + ..., + description="Input name as expected by the plugin CLI", + examples=["inputImages", "fileNamePattern", "thresholdValue"], + title="Input name", + ) + description: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( + ..., + examples=["Input Images"], + title="Input description", + ) + else: + name: constr(regex=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$") = Field( + ..., + description="Input name as expected by the plugin CLI", + examples=["inputImages", "fileNamePattern", "thresholdValue"], + title="Input name", + ) + description: constr(regex=r"^(.*)$") = Field( + ..., + examples=["Input Images"], + title="Input description", + ) + type: InputTypes + required: Optional[bool] = Field( + True, + description="Whether an input is required or not", + examples=[True], + title="Required input", + ) + + def __init__(self, **data) -> None: # ruff: noqa: ANN003 + """Initialize input.""" + super().__init__(**data) + + if self.description is None: + logger.warning( + f""" + The input ({self.name}) is missing the description field. + This field is not required but should be filled in. + """, + ) + + +def _check_version_number(value: Union[str, int]) -> bool: + if isinstance(value, int): + value = str(value) + if "-" in value: + value = value.split("-")[0] + if len(value) > 1 and value[0] == "0": + return False + return bool(re.match(r"^\d+$", value)) + + +if PYDANTIC_V2: + + class Version(RootModel): + """SemVer object.""" + + root: str + + @field_validator("root") + @classmethod + def semantic_version( + cls, + value, + ) -> Any: # ruff: noqa: ANN202, N805, ANN001 + """Pydantic Validator to check semver.""" + version = value.split(".") + + assert ( + len(version) == 3 # ruff: noqa: PLR2004 + ), f""" + Invalid version ({value}). Version must follow + semantic versioning (see semver.org)""" + if "-" in version[-1]: # with hyphen + idn = version[-1].split("-")[-1] + id_reg = re.compile("[0-9A-Za-z-]+") + assert bool( + id_reg.match(idn), + ), f"""Invalid version ({value}). + Version must follow semantic versioning (see semver.org)""" + + assert all( + map(_check_version_number, version), + ), f"""Invalid version ({value}). + Version must follow semantic versioning (see semver.org)""" + return value + + @property + def major(self): + """Return x from x.y.z .""" + return int(self.root.split(".")[0]) + + @property + def minor(self): + """Return y from x.y.z .""" + return int(self.root.split(".")[1]) + + @property + def patch(self): + """Return z from x.y.z .""" + if not self.root.split(".")[2].isdigit(): + msg = "Patch version is not a digit, comparison may not be accurate." + logger.warning(msg) + return self.root.split(".")[2] + return int(self.root.split(".")[2]) + + def __str__(self) -> str: + """Return string representation of Version object.""" + return self.root + + @singledispatchmethod + def __lt__(self, other: Any) -> bool: + """Compare if Version is less than other object.""" + msg = "invalid type for comparison." + raise TypeError(msg) + + @singledispatchmethod + def __gt__(self, other: Any) -> bool: + """Compare if Version is less than other object.""" + msg = "invalid type for comparison." + raise TypeError(msg) + + @singledispatchmethod + def __eq__(self, other: Any) -> bool: + """Compare if two Version objects are equal.""" + msg = "invalid type for comparison." + raise TypeError(msg) + + def __hash__(self) -> int: + """Needed to use Version objects as dict keys.""" + return hash(self.root) + + def __repr__(self) -> str: + """Return string representation of Version object.""" + return self.root + + @Version.__eq__.register(str) # pylint: disable=no-member + def _(self, other): + return self == Version(other) + + @Version.__lt__.register(str) # pylint: disable=no-member + def _(self, other): + v = Version(other) + return self < v + + @Version.__gt__.register(str) # pylint: disable=no-member + def _(self, other): + v = Version(other) + return self > v + +else: # PYDANTIC_V1 + + class Version(BaseModel): + """SemVer object.""" + + version: str + + def __init__(self, version: str) -> None: + """Initialize Version object.""" + super().__init__(version=version) + + @validator("version") + def semantic_version( + cls, + value, + ): # ruff: noqa: ANN202, N805, ANN001 + """Pydantic Validator to check semver.""" + version = value.split(".") + + assert ( + len(version) == 3 # ruff: noqa: PLR2004 + ), f""" + Invalid version ({value}). Version must follow + semantic versioning (see semver.org)""" + if "-" in version[-1]: # with hyphen + idn = version[-1].split("-")[-1] + id_reg = re.compile("[0-9A-Za-z-]+") + assert bool( + id_reg.match(idn), + ), f"""Invalid version ({value}). + Version must follow semantic versioning (see semver.org)""" + + assert all( + map(_check_version_number, version), + ), f"""Invalid version ({value}). + Version must follow semantic versioning (see semver.org)""" + return value + + @property + def major(self): + """Return x from x.y.z .""" + return int(self.version.split(".")[0]) + + @property + def minor(self): + """Return y from x.y.z .""" + return int(self.version.split(".")[1]) + + @property + def patch(self): + """Return z from x.y.z .""" + if not self.version.split(".")[2].isdigit(): + msg = "Patch version is not a digit, comparison may not be accurate." + logger.warning(msg) + return self.version.split(".")[2] + return int(self.version.split(".")[2]) + + def __str__(self) -> str: + """Return string representation of Version object.""" + return self.version + + @singledispatchmethod + def __lt__(self, other: Any) -> bool: + """Compare if Version is less than other object.""" + msg = "invalid type for comparison." + raise TypeError(msg) + + @singledispatchmethod + def __gt__(self, other: Any) -> bool: + """Compare if Version is less than other object.""" + msg = "invalid type for comparison." + raise TypeError(msg) + + @singledispatchmethod + def __eq__(self, other: Any) -> bool: + """Compare if two Version objects are equal.""" + msg = "invalid type for comparison." + raise TypeError(msg) + + def __hash__(self) -> int: + """Needed to use Version objects as dict keys.""" + return hash(self.version) + + @Version.__eq__.register(str) # pylint: disable=no-member + def _(self, other): + return self == Version(**{"version": other}) + + @Version.__lt__.register(str) # pylint: disable=no-member + def _(self, other): + v = Version(**{"version": other}) + return self < v + + @Version.__gt__.register(str) # pylint: disable=no-member + def _(self, other): + v = Version(**{"version": other}) + return self > v + + +@Version.__eq__.register(Version) # pylint: disable=no-member +def _(self, other): + return ( + other.major == self.major + and other.minor == self.minor + and other.patch == self.patch + ) + + +@Version.__lt__.register(Version) # pylint: disable=no-member +def _(self, other): + if other.major > self.major: + return True + if other.major == self.major: + if other.minor > self.minor: + return True + if other.minor == self.minor: + if other.patch > self.patch: + return True + return False + return False + return False + + +@Version.__gt__.register(Version) # pylint: disable=no-member +def _(self, other): + return other < self + + +class DuplicateVersionFoundError(Exception): + """Raise when two equal versions found.""" + + +CWL_INPUT_TYPES = { + "path": "Directory", # always Dir? Yes + "string": "string", + "number": "double", + "boolean": "boolean", + "genericData": "Directory", + "collection": "Directory", + "enum": "string", # for compatibility with workflows + "stitchingVector": "Directory", + "integer": "long", + # not yet implemented: array +} + + +def _type_in(inp: Input): + """Return appropriate value for `type` based on input type.""" + val = inp.type.value + req = "" if inp.required else "?" + + # NOT compatible with CWL workflows, ok in CLT + # if val == "enum": + # if input.required: + + # if val in CWL_INPUT_TYPES: + return CWL_INPUT_TYPES[val] + req if val in CWL_INPUT_TYPES else "string" + req + + +def input_to_cwl(inp: Input): + """Return dict of inputs for cwl.""" + return { + f"{inp.name}": { + "type": _type_in(inp), + "inputBinding": {"prefix": f"--{inp.name}"}, + }, + } + + +def output_to_cwl(out: Output): + """Return dict of output args for cwl for input section.""" + return { + f"{out.name}": { + "type": "Directory", + "inputBinding": {"prefix": f"--{out.name}"}, + }, + } + + +def outputs_cwl(out: Output): + """Return dict of output for `outputs` in cwl.""" + return { + f"{out.name}": { + "type": "Directory", + "outputBinding": {"glob": f"$(inputs.{out.name}.basename)"}, + }, + } + + +# -- I/O as arguments in .yml + + +@singledispatch +def _io_value_to_yml(io) -> Union[str, dict]: + return str(io) + + +@_io_value_to_yml.register +def _(io: pathlib.Path): + return {"class": "Directory", "location": str(io)} + + +@_io_value_to_yml.register +def _(io: enum.Enum): + return io.name + + +def io_to_yml(io): + """Return IO entry for yml file.""" + return _io_value_to_yml(io.value) diff --git a/src/polus/tabular/_plugins/manifests/__init__.py b/src/polus/tabular/_plugins/manifests/__init__.py new file mode 100644 index 0000000..5854298 --- /dev/null +++ b/src/polus/tabular/_plugins/manifests/__init__.py @@ -0,0 +1,15 @@ +"""Initialize manifests module.""" + +from polus.tabular._plugins.manifests.manifest_utils import InvalidManifestError +from polus.tabular._plugins.manifests.manifest_utils import _error_log +from polus.tabular._plugins.manifests.manifest_utils import _load_manifest +from polus.tabular._plugins.manifests.manifest_utils import _scrape_manifests +from polus.tabular._plugins.manifests.manifest_utils import validate_manifest + +__all__ = [ + "InvalidManifestError", + "_load_manifest", + "validate_manifest", + "_error_log", + "_scrape_manifests", +] diff --git a/src/polus/tabular/_plugins/manifests/manifest_utils.py b/src/polus/tabular/_plugins/manifests/manifest_utils.py new file mode 100644 index 0000000..6a5c5f8 --- /dev/null +++ b/src/polus/tabular/_plugins/manifests/manifest_utils.py @@ -0,0 +1,210 @@ +"""Utilities for manifest parsing and validation.""" +import json +import logging +import pathlib +from typing import Optional +from typing import Union + +import github +import requests +import validators +from polus.tabular._plugins._compat import PYDANTIC_V2 +from polus.tabular._plugins.models import ComputeSchema +from polus.tabular._plugins.models import WIPPPluginManifest +from pydantic import ValidationError +from pydantic import errors +from tqdm import tqdm + +if not PYDANTIC_V2: + from polus.tabular._plugins.utils import cast_version + +logger = logging.getLogger("polus.tabular") + +# Fields that must be in a plugin manifest +REQUIRED_FIELDS = [ + "name", + "version", + "description", + "author", + "containerId", + "inputs", + "outputs", + "ui", +] + + +class InvalidManifestError(Exception): + """Raised when manifest has validation errors.""" + + +def is_valid_manifest(plugin: dict) -> bool: + """Validate basic attributes of a plugin manifest. + + Args: + plugin: A parsed plugin json file + + Returns: + True if the plugin has the minimal json fields + """ + fields = list(plugin.keys()) + + for field in REQUIRED_FIELDS: + if field not in fields: + msg = f"Missing json field, {field}, in plugin manifest." + logger.error(msg) + return False + return True + + +def _load_manifest(manifest: Union[str, dict, pathlib.Path]) -> dict: + """Return manifest as dict from str (url or path) or pathlib.Path.""" + if isinstance(manifest, dict): # is dict + return manifest + if isinstance(manifest, pathlib.Path): # is path + if manifest.suffix != ".json": + msg = "plugin manifest must be a json file with .json extension." + raise ValueError(msg) + + with manifest.open("r", encoding="utf-8") as manifest_json: + manifest_ = json.load(manifest_json) + elif isinstance(manifest, str): # is str + if validators.url(manifest): # is url + manifest_ = requests.get(manifest, timeout=10).json() + else: # could (and should) be path + try: + manifest_ = _load_manifest(pathlib.Path(manifest)) + except Exception as exc: # was not a Path? # noqa + msg = "invalid manifest" + raise ValueError(msg) from exc + else: # is not str, dict, or path + msg = f"invalid manifest type {type(manifest)}" + raise ValueError(msg) + return manifest_ + + +def validate_manifest( + manifest: Union[str, dict, pathlib.Path], +) -> Union[WIPPPluginManifest, ComputeSchema]: + """Validate a plugin manifest against schema.""" + manifest = _load_manifest(manifest) + if not PYDANTIC_V2: # Pydantic V1 + manifest["version"] = cast_version( + manifest["version"], + ) # cast version to semver object + if "name" in manifest: + name = manifest["name"] + else: + msg = f"{manifest} has no value for name" + raise InvalidManifestError(msg) + + if "pluginHardwareRequirements" in manifest: + # Parse the manifest + try: + plugin = ComputeSchema(**manifest) + except ValidationError as e: + msg = f"{name} does not conform to schema" + raise InvalidManifestError(msg) from e + except BaseException as e: + raise e + else: + # Parse the manifest + try: + plugin = WIPPPluginManifest(**manifest) + except ValidationError as e: + msg = f"{manifest['name']} does not conform to schema" + raise InvalidManifestError( + msg, + ) from e + except BaseException as e: + raise e + return plugin + + +def _scrape_manifests( + repo: Union[str, github.Repository.Repository], # type: ignore + gh: github.Github, + min_depth: int = 1, + max_depth: Optional[int] = None, + return_invalid: bool = False, +) -> Union[list, tuple[list, list]]: + if max_depth is None: + max_depth = min_depth + min_depth = 0 + + if not max_depth >= min_depth: + msg = "max_depth is smaller than min_depth" + raise ValueError(msg) + + if isinstance(repo, str): + repo = gh.get_repo(repo) + + contents = list(repo.get_contents("")) # type: ignore + next_contents: list = [] + valid_manifests: list = [] + invalid_manifests: list = [] + + for d in range(0, max_depth): + for content in tqdm(contents, desc=f"{repo.full_name}: {d}"): + if content.type == "dir": + next_contents.extend(repo.get_contents(content.path)) # type: ignore + elif content.name.endswith(".json") and d >= min_depth: + manifest = json.loads(content.decoded_content) + if is_valid_manifest(manifest): + valid_manifests.append(manifest) + else: + invalid_manifests.append(manifest) + + contents = next_contents.copy() + next_contents = [] + + if return_invalid: + return valid_manifests, invalid_manifests + return valid_manifests + + +def _error_log(val_err: ValidationError, manifest: dict, fct: str) -> None: + report = [] + + for error in val_err.args[0]: + if isinstance(error, list): + error = error[0] # noqa + + if isinstance(error, AssertionError): + msg = ( + f"The plugin ({manifest['name']}) " + "failed an assertion check: {err.args[0]}" + ) + report.append(msg) + logger.critical(f"{fct}: {report[-1]}") # pylint: disable=W1203 + elif isinstance(error.exc, errors.MissingError): + msg = ( + f"The plugin ({manifest['name']}) " + "is missing fields: {err.loc_tuple()}" + ) + report.append(msg) + logger.critical(f"{fct}: {report[-1]}") # pylint: disable=W1203 + elif errors.ExtraError: + if error.loc_tuple()[0] in ["inputs", "outputs", "ui"]: + manifest_ = manifest[error.loc_tuple()[0]][error.loc_tuple()[1]]["name"] + msg = ( + f"The plugin ({manifest['name']}) " + "had unexpected values in the " + f"{error.loc_tuple()[0]} " + f"({manifest_}): " + f"{error.exc.args[0][0].loc_tuple()}" + ) + report.append(msg) + else: + msg = ( + f"The plugin ({manifest['name']}) " + "had an error: {err.exc.args[0][0]}" + ) + report.append(msg) + logger.critical(f"{fct}: {report[-1]}") # pylint: disable=W1203 + else: + str_val_err = str(val_err).replace("\n", ", ").replace(" ", " ") + msg = ( + f"{fct}: Uncaught manifest error in ({manifest['name']}): " + f"{str_val_err}" + ) + logger.warning(msg) diff --git a/src/polus/tabular/_plugins/models/PolusComputeSchema.json b/src/polus/tabular/_plugins/models/PolusComputeSchema.json new file mode 100644 index 0000000..d4875d5 --- /dev/null +++ b/src/polus/tabular/_plugins/models/PolusComputeSchema.json @@ -0,0 +1,499 @@ +{ + "definitions": { + "PluginInputType": { + "title": "PluginInputType", + "description": "An enumeration.", + "enum": [ + "path", + "string", + "number", + "array", + "boolean" + ] + }, + "PluginInput": { + "title": "PluginInput", + "type": "object", + "properties": { + "format": { + "title": "Format", + "type": "string" + }, + "label": { + "title": "Label", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "required": { + "title": "Required", + "type": "boolean" + }, + "type": { + "$ref": "#/definitions/PluginInputType" + }, + "default": { + "title": "Default", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "boolean" + } + ] + } + }, + "required": [ + "name", + "required", + "type" + ] + }, + "PluginOutputType": { + "title": "PluginOutputType", + "description": "An enumeration.", + "enum": [ + "path" + ], + "type": "string" + }, + "PluginOutput": { + "title": "PluginOutput", + "type": "object", + "properties": { + "format": { + "title": "Format", + "type": "string" + }, + "label": { + "title": "Label", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "type": { + "$ref": "#/definitions/PluginOutputType" + } + }, + "required": [ + "name", + "type" + ] + }, + "GpuVendor": { + "title": "GpuVendor", + "description": "An enumeration.", + "enum": [ + "none", + "amd", + "tpu", + "nvidia" + ], + "type": "string" + }, + "PluginHardwareRequirements": { + "title": "PluginHardwareRequirements", + "type": "object", + "properties": { + "coresMax": { + "title": "Coresmax", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + }, + "coresMin": { + "title": "Coresmin", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + }, + "cpuAVX": { + "title": "Cpuavx", + "type": "boolean" + }, + "cpuAVX2": { + "title": "Cpuavx2", + "type": "boolean" + }, + "cpuMin": { + "title": "Cpumin", + "type": "string" + }, + "gpu": { + "$ref": "#/definitions/GpuVendor" + }, + "gpuCount": { + "title": "Gpucount", + "type": "number" + }, + "gpuDriverVersion": { + "title": "Gpudriverversion", + "type": "string" + }, + "gpuType": { + "title": "Gputype", + "type": "string" + }, + "outDirMax": { + "title": "Outdirmax", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + }, + "outDirMin": { + "title": "Outdirmin", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + }, + "ramMax": { + "title": "Rammax", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + }, + "ramMin": { + "title": "Rammin", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + }, + "tmpDirMax": { + "title": "Tmpdirmax", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + }, + "tmpDirMin": { + "title": "Tmpdirmin", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + } + ] + } + } + }, + "ThenEntry": { + "title": "ThenEntry", + "type": "object", + "properties": { + "action": { + "title": "Action", + "type": "string" + }, + "input": { + "title": "Input", + "type": "string" + }, + "value": { + "title": "Value", + "type": "string" + } + }, + "required": [ + "action", + "input", + "value" + ] + }, + "ConditionEntry": { + "title": "ConditionEntry", + "type": "object", + "properties": { + "expression": { + "title": "Expression", + "type": "string" + } + }, + "required": [ + "expression" + ] + }, + "Validator": { + "title": "Validator", + "type": "object", + "properties": { + "then": { + "title": "Then", + "type": "array", + "items": { + "$ref": "#/definitions/ThenEntry" + } + }, + "validator": { + "title": "Validator", + "type": "array", + "items": { + "$ref": "#/definitions/ConditionEntry" + } + } + } + }, + "PluginUIType": { + "title": "PluginUIType", + "description": "An enumeration.", + "enum": [ + "checkbox", + "color", + "date", + "email", + "number", + "password", + "radio", + "range", + "text", + "time" + ] + }, + "PluginUIInput": { + "title": "PluginUIInput", + "type": "object", + "properties": { + "bind": { + "title": "Bind", + "type": "string" + }, + "condition": { + "title": "Condition", + "anyOf": [ + { + "type": "array", + "items": { + "$ref": "#/definitions/Validator" + } + }, + { + "type": "string" + } + ] + }, + "default": { + "title": "Default", + "anyOf": [ + { + "type": "string" + }, + { + "type": "number" + }, + { + "type": "boolean" + } + ] + }, + "description": { + "title": "Description", + "type": "string" + }, + "fieldset": { + "title": "Fieldset", + "type": "array", + "items": { + "type": "string" + } + }, + "hidden": { + "title": "Hidden", + "type": "boolean" + }, + "key": { + "title": "Key", + "type": "string" + }, + "title": { + "title": "Title", + "type": "string" + }, + "type": { + "$ref": "#/definitions/PluginUIType" + } + }, + "required": [ + "key", + "title", + "type" + ] + }, + "PluginUIOutput": { + "title": "PluginUIOutput", + "type": "object", + "properties": { + "description": { + "title": "Description", + "type": "string" + }, + "format": { + "title": "Format", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "type": { + "$ref": "#/definitions/PluginUIType" + }, + "website": { + "title": "Website", + "type": "string" + } + }, + "required": [ + "description", + "name", + "type" + ] + }, + "PluginSchema": { + "title": "PluginSchema", + "type": "object", + "properties": { + "author": { + "title": "Author", + "type": "string" + }, + "baseCommand": { + "title": "Basecommand", + "type": "array", + "items": { + "type": "string" + } + }, + "citation": { + "title": "Citation", + "type": "string" + }, + "containerId": { + "title": "Containerid", + "type": "string" + }, + "customInputs": { + "title": "Custominputs", + "type": "boolean" + }, + "description": { + "title": "Description", + "type": "string" + }, + "inputs": { + "title": "Inputs", + "type": "array", + "items": { + "$ref": "#/definitions/PluginInput" + } + }, + "institution": { + "title": "Institution", + "type": "string" + }, + "name": { + "title": "Name", + "type": "string" + }, + "outputs": { + "title": "Outputs", + "type": "array", + "items": { + "$ref": "#/definitions/PluginOutput" + } + }, + "pluginHardwareRequirements": { + "$ref": "#/definitions/PluginHardwareRequirements" + }, + "repository": { + "title": "Repository", + "type": "string" + }, + "title": { + "title": "Title", + "type": "string" + }, + "ui": { + "title": "Ui", + "type": "array", + "items": { + "anyOf": [ + { + "$ref": "#/definitions/PluginUIInput" + }, + { + "$ref": "#/definitions/PluginUIOutput" + } + ] + } + }, + "version": { + "title": "Version", + "examples": [ + "0.1.0", + "0.1.0rc1" + ], + "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", + "type": "string" + }, + "website": { + "title": "Website", + "type": "string" + } + }, + "required": [ + "containerId", + "description", + "inputs", + "name", + "outputs", + "pluginHardwareRequirements", + "title", + "ui", + "version" + ] + } + } +} diff --git a/src/polus/tabular/_plugins/models/PolusComputeSchema.ts b/src/polus/tabular/_plugins/models/PolusComputeSchema.ts new file mode 100644 index 0000000..184ebbf --- /dev/null +++ b/src/polus/tabular/_plugins/models/PolusComputeSchema.ts @@ -0,0 +1,102 @@ +/* tslint:disable */ +/* eslint-disable */ +/** +/* This file was automatically generated from pydantic models by running pydantic2ts. +/* Do not modify it by hand - just update the pydantic models and then re-run the script +*/ + +export type GpuVendor = "none" | "amd" | "tpu" | "nvidia"; +export type PluginInputType = "path" | "string" | "number" | "array" | "boolean"; +export type PluginOutputType = "path"; +export type PluginUIType = + | "checkbox" + | "color" + | "date" + | "email" + | "number" + | "password" + | "radio" + | "range" + | "text" + | "time"; + +export interface ConditionEntry { + expression: string; +} +export interface Model {} +export interface PluginHardwareRequirements { + coresMax?: string | number; + coresMin?: string | number; + cpuAVX?: boolean; + cpuAVX2?: boolean; + cpuMin?: string; + gpu?: GpuVendor; + gpuCount?: number; + gpuDriverVersion?: string; + gpuType?: string; + outDirMax?: string | number; + outDirMin?: string | number; + ramMax?: string | number; + ramMin?: string | number; + tmpDirMax?: string | number; + tmpDirMin?: string | number; +} +export interface PluginInput { + format?: string; + label?: string; + name: string; + required: boolean; + type: PluginInputType; + default?: string | number | boolean; +} +export interface PluginOutput { + format?: string; + label?: string; + name: string; + type: PluginOutputType; +} +export interface PluginSchema { + author?: string; + baseCommand?: string[]; + citation?: string; + containerId: string; + customInputs?: boolean; + description: string; + inputs: PluginInput[]; + institution?: string; + name: string; + outputs: PluginOutput[]; + pluginHardwareRequirements: PluginHardwareRequirements; + repository?: string; + title: string; + ui: (PluginUIInput | PluginUIOutput)[]; + version: string; + website?: string; +} +export interface PluginUIInput { + bind?: string; + condition?: Validator[] | string; + default?: string | number | boolean; + description?: string; + fieldset?: string[]; + hidden?: boolean; + key: string; + title: string; + type: PluginUIType; +} +export interface Validator { + then?: ThenEntry[]; + validator?: ConditionEntry[]; +} +export interface ThenEntry { + action: string; + input: string; + value: string; +} +export interface PluginUIOutput { + description: string; + format?: string; + name: string; + type: PluginUIType; + website?: string; +} diff --git a/src/polus/tabular/_plugins/models/__init__.py b/src/polus/tabular/_plugins/models/__init__.py new file mode 100644 index 0000000..55f3558 --- /dev/null +++ b/src/polus/tabular/_plugins/models/__init__.py @@ -0,0 +1,35 @@ +"""Pydantic Models based on JSON schemas.""" + +import pydantic + +PYDANTIC_VERSION = pydantic.__version__ + +if PYDANTIC_VERSION.split(".")[0] == "1": + from polus.tabular._plugins.models.pydanticv1.compute import ( + PluginSchema as ComputeSchema, + ) + from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import ( + PluginUIInput, + ) + from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import ( + PluginUIOutput, + ) + from polus.tabular._plugins.models.pydanticv1.wipp import WIPPPluginManifest +elif PYDANTIC_VERSION.split(".")[0] == "2": + from polus.tabular._plugins.models.pydanticv2.compute import ( + PluginSchema as ComputeSchema, + ) + from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import ( + PluginUIInput, + ) + from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import ( + PluginUIOutput, + ) + from polus.tabular._plugins.models.pydanticv2.wipp import WIPPPluginManifest + +__all__ = [ + "WIPPPluginManifest", + "PluginUIInput", + "PluginUIOutput", + "ComputeSchema", +] diff --git a/src/polus/tabular/_plugins/models/pydanticv1/PolusComputeSchema.py b/src/polus/tabular/_plugins/models/pydanticv1/PolusComputeSchema.py new file mode 100644 index 0000000..a40b5b4 --- /dev/null +++ b/src/polus/tabular/_plugins/models/pydanticv1/PolusComputeSchema.py @@ -0,0 +1,137 @@ +# generated by datamodel-codegen: +# timestamp: 2022-09-21T03:41:58+00:00 + +from __future__ import annotations + +from enum import Enum +from typing import Any + +from pydantic import BaseModel +from pydantic import Field +from pydantic import constr + + +class Model(BaseModel): + __root__: Any + + +class PluginInputType(Enum): + path = "path" + string = "string" + number = "number" + array = "array" + boolean = "boolean" + + +class PluginInput(BaseModel): + format: str | None = Field(None, title="Format") + label: str | None = Field(None, title="Label") + name: str = Field(..., title="Name") + required: bool = Field(..., title="Required") + type: PluginInputType + default: str | float | bool | None = Field(None, title="Default") + + +class PluginOutputType(Enum): + path = "path" + + +class PluginOutput(BaseModel): + format: str | None = Field(None, title="Format") + label: str | None = Field(None, title="Label") + name: str = Field(..., title="Name") + type: PluginOutputType + + +class GpuVendor(Enum): + none = "none" + amd = "amd" + tpu = "tpu" + nvidia = "nvidia" + + +class PluginHardwareRequirements(BaseModel): + coresMax: str | float | None = Field(None, title="Coresmax") + coresMin: str | float | None = Field(None, title="Coresmin") + cpuAVX: bool | None = Field(None, title="Cpuavx") + cpuAVX2: bool | None = Field(None, title="Cpuavx2") + cpuMin: str | None = Field(None, title="Cpumin") + gpu: GpuVendor | None = None + gpuCount: float | None = Field(None, title="Gpucount") + gpuDriverVersion: str | None = Field(None, title="Gpudriverversion") + gpuType: str | None = Field(None, title="Gputype") + outDirMax: str | float | None = Field(None, title="Outdirmax") + outDirMin: str | float | None = Field(None, title="Outdirmin") + ramMax: str | float | None = Field(None, title="Rammax") + ramMin: str | float | None = Field(None, title="Rammin") + tmpDirMax: str | float | None = Field(None, title="Tmpdirmax") + tmpDirMin: str | float | None = Field(None, title="Tmpdirmin") + + +class ThenEntry(BaseModel): + action: str = Field(..., title="Action") + input: str = Field(..., title="Input") + value: str = Field(..., title="Value") + + +class ConditionEntry(BaseModel): + expression: str = Field(..., title="Expression") + + +class Validator(BaseModel): + then: list[ThenEntry] | None = Field(None, title="Then") + validator: list[ConditionEntry] | None = Field(None, title="Validator") + + +class PluginUIType(Enum): + checkbox = "checkbox" + color = "color" + date = "date" + email = "email" + number = "number" + password = "password" + radio = "radio" + range = "range" + text = "text" + time = "time" + + +class PluginUIInput(BaseModel): + bind: str | None = Field(None, title="Bind") + condition: list[Validator] | str | None = Field(None, title="Condition") + default: str | float | bool | None = Field(None, title="Default") + description: str | None = Field(None, title="Description") + fieldset: list[str] | None = Field(None, title="Fieldset") + hidden: bool | None = Field(None, title="Hidden") + key: str = Field(..., title="Key") + title: str = Field(..., title="Title") + type: PluginUIType + + +class PluginUIOutput(BaseModel): + description: str = Field(..., title="Description") + format: str | None = Field(None, title="Format") + name: str = Field(..., title="Name") + type: PluginUIType + website: str | None = Field(None, title="Website") + + +class PluginSchema(BaseModel): + author: str | None = Field(None, title="Author") + baseCommand: list[str] | None = Field(None, title="Basecommand") + citation: str | None = Field(None, title="Citation") + containerId: str = Field(..., title="Containerid") + customInputs: bool | None = Field(None, title="Custominputs") + description: str = Field(..., title="Description") + inputs: list[PluginInput] = Field(..., title="Inputs") + institution: str | None = Field(None, title="Institution") + name: str = Field(..., title="Name") + outputs: list[PluginOutput] = Field(..., title="Outputs") + pluginHardwareRequirements: PluginHardwareRequirements + repository: str | None = Field(None, title="Repository") + title: str = Field(..., title="Title") + ui: list[PluginUIInput | PluginUIOutput] = Field(..., title="Ui") + version: constr( + regex=r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$", + ) = Field(..., examples=["0.1.0", "0.1.0rc1"], title="Version") + website: str | None = Field(None, title="Website") diff --git a/src/polus/tabular/_plugins/models/pydanticv1/WIPPPluginSchema.py b/src/polus/tabular/_plugins/models/pydanticv1/WIPPPluginSchema.py new file mode 100644 index 0000000..718d3a3 --- /dev/null +++ b/src/polus/tabular/_plugins/models/pydanticv1/WIPPPluginSchema.py @@ -0,0 +1,233 @@ +# generated by datamodel-codegen: +# timestamp: 2023-01-04T14:54:38+00:00 + +from __future__ import annotations + +from enum import Enum +from typing import Any + +from pydantic import AnyUrl +from pydantic import BaseModel +from pydantic import Field +from pydantic import constr + + +class Type(Enum): + collection = "collection" + stitchingVector = "stitchingVector" + tensorflowModel = "tensorflowModel" + csvCollection = "csvCollection" + pyramid = "pyramid" + pyramidAnnotation = "pyramidAnnotation" + notebook = "notebook" + genericData = "genericData" + string = "string" + number = "number" + integer = "integer" + enum = "enum" + array = "array" + boolean = "boolean" + + +class Input(BaseModel): + name: constr(regex=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$") = Field( + ..., + description="Input name as expected by the plugin CLI", + examples=["inputImages", "fileNamePattern", "thresholdValue"], + title="Input name", + ) + type: Type = Field( + ..., + examples=["collection", "string", "number"], + title="Input Type", + ) + description: constr(regex=r"^(.*)$") = Field( + ..., + examples=["Input Images"], + title="Input description", + ) + required: bool | None = Field( + True, + description="Whether an input is required or not", + examples=[True], + title="Required input", + ) + + +class Type1(Enum): + collection = "collection" + stitchingVector = "stitchingVector" + tensorflowModel = "tensorflowModel" + tensorboardLogs = "tensorboardLogs" + csvCollection = "csvCollection" + pyramid = "pyramid" + pyramidAnnotation = "pyramidAnnotation" + genericData = "genericData" + + +class Output(BaseModel): + name: constr(regex=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$") = Field( + ..., + examples=["outputCollection"], + title="Output name", + ) + type: Type1 = Field( + ..., + examples=["stitchingVector", "collection"], + title="Output type", + ) + description: constr(regex=r"^(.*)$") = Field( + ..., + examples=["Output collection"], + title="Output description", + ) + + +class UiItem(BaseModel): + key: Any | Any = Field( + ..., + description="Key of the input which this UI definition applies to, the expected format is 'inputs.inputName'. Special keyword 'fieldsets' can be used to define arrangement of inputs by sections.", + examples=["inputs.inputImages", "inputs.fileNamPattern", "fieldsets"], + title="UI key", + ) + + +class CudaRequirements(BaseModel): + deviceMemoryMin: float | None = Field( + 0, + examples=[100], + title="Minimum device memory", + ) + cudaComputeCapability: str | list[Any] | None = Field( + None, + description="Specify either a single minimum value, or an array of valid values", + examples=["8.0", ["3.5", "5.0", "6.0", "7.0", "7.5", "8.0"]], + title="The cudaComputeCapability Schema", + ) + + +class ResourceRequirements(BaseModel): + ramMin: float | None = Field( + None, + examples=[2048], + title="Minimum RAM in mebibytes (Mi)", + ) + coresMin: float | None = Field( + None, + examples=[1], + title="Minimum number of CPU cores", + ) + cpuAVX: bool | None = Field( + False, + examples=[True], + title="Advanced Vector Extensions (AVX) CPU capability required", + ) + cpuAVX2: bool | None = Field( + False, + examples=[False], + title="Advanced Vector Extensions 2 (AVX2) CPU capability required", + ) + gpu: bool | None = Field( + False, + examples=[True], + title="GPU/accelerator required", + ) + cudaRequirements: CudaRequirements | None = Field( + {}, + examples=[{"deviceMemoryMin": 100, "cudaComputeCapability": "8.0"}], + title="GPU Cuda-related requirements", + ) + + +class WippPluginManifest(BaseModel): + name: constr(regex=r"^(.*)$", min_length=1) = Field( + ..., + description="Name of the plugin (format: org/name)", + examples=["wipp/plugin-example"], + title="Plugin name", + ) + version: constr(regex=r"^(.*)$", min_length=1) = Field( + ..., + description="Version of the plugin (semantic versioning preferred)", + examples=["1.0.0"], + title="Plugin version", + ) + title: constr(regex=r"^(.*)$", min_length=1) = Field( + ..., + description="Plugin title to display in WIPP forms", + examples=["WIPP Plugin example"], + title="Plugin title", + ) + description: constr(regex=r"^(.*)$", min_length=1) = Field( + ..., + examples=["Custom image segmentation plugin"], + title="Short description of the plugin", + ) + author: constr(regex="^(.*)$") | None | None = Field( + "", + examples=["FirstName LastName"], + title="Author(s)", + ) + institution: constr(regex="^(.*)$") | None | None = Field( + "", + examples=["National Institute of Standards and Technology"], + title="Institution", + ) + repository: AnyUrl | None | None = Field( + "", + examples=["https://github.com/usnistgov/WIPP"], + title="Source code repository", + ) + website: AnyUrl | None | None = Field( + "", + examples=["http://usnistgov.github.io/WIPP"], + title="Website", + ) + citation: constr(regex="^(.*)$") | None | None = Field( + "", + examples=[ + "Peter Bajcsy, Joe Chalfoun, and Mylene Simon (2018). Web Microanalysis of Big Image Data. Springer-Verlag International", + ], + title="Citation", + ) + containerId: constr(regex=r"^(.*)$") = Field( + ..., + description="Docker image ID", + examples=["docker.io/wipp/plugin-example:1.0.0"], + title="ContainerId", + ) + baseCommand: list[str] | None = Field( + None, + description="Base command to use while running container image", + examples=[["python3", "/opt/executable/main.py"]], + title="Base command", + ) + inputs: list[Input] = Field( + ..., + description="Defines inputs to the plugin", + title="List of Inputs", + unique_items=True, + ) + outputs: list[Output] = Field( + ..., + description="Defines the outputs of the plugin", + title="List of Outputs", + ) + ui: list[UiItem] = Field(..., title="Plugin form UI definition") + resourceRequirements: ResourceRequirements | None = Field( + {}, + examples=[ + { + "ramMin": 2048, + "coresMin": 1, + "cpuAVX": True, + "cpuAVX2": False, + "gpu": True, + "cudaRequirements": { + "deviceMemoryMin": 100, + "cudaComputeCapability": "8.0", + }, + }, + ], + title="Plugin Resource Requirements", + ) diff --git a/src/polus/tabular/_plugins/models/pydanticv1/__init__.py b/src/polus/tabular/_plugins/models/pydanticv1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/polus/tabular/_plugins/models/pydanticv1/compute.py b/src/polus/tabular/_plugins/models/pydanticv1/compute.py new file mode 100644 index 0000000..86e2c27 --- /dev/null +++ b/src/polus/tabular/_plugins/models/pydanticv1/compute.py @@ -0,0 +1,28 @@ +"""Extending automatically generated compute model. + +This file modifies and extend certain fields and +functions of PolusComputeSchema.py which is automatically +generated by datamodel-codegen from JSON schema. +""" + +from polus.tabular._plugins.io import IOBase +from polus.tabular._plugins.io import Version +from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import PluginInput +from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import PluginOutput +from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import PluginSchema + + +class PluginInput(PluginInput, IOBase): # type: ignore + """Base Class for Input Args.""" + + +class PluginOutput(PluginOutput, IOBase): # type: ignore + """Base Class for Output Args.""" + + +class PluginSchema(PluginSchema): # type: ignore + """Extended Compute Plugin Schema with extended IO defs.""" + + inputs: list[PluginInput] + outputs: list[PluginOutput] + version: Version diff --git a/src/polus/tabular/_plugins/models/pydanticv1/wipp.py b/src/polus/tabular/_plugins/models/pydanticv1/wipp.py new file mode 100644 index 0000000..402cba5 --- /dev/null +++ b/src/polus/tabular/_plugins/models/pydanticv1/wipp.py @@ -0,0 +1,79 @@ +"""Extending automatically generated wipp model. + +This file modifies and extend certain fields and +functions of WIPPPluginSchema.py which is automatically +generated by datamodel-codegen from JSON schema. +""" +from typing import Literal +from typing import Optional +from typing import Union + +from polus.tabular._plugins.io import Input +from polus.tabular._plugins.io import Output +from polus.tabular._plugins.io import Version +from polus.tabular._plugins.models.pydanticv1.WIPPPluginSchema import ( + ResourceRequirements, +) +from polus.tabular._plugins.models.pydanticv1.WIPPPluginSchema import WippPluginManifest +from pydantic import BaseModel +from pydantic import Field + + +class UI1(BaseModel): + """Base class for UI items.""" + + key: str = Field(constr=r"^inputs.[a-zA-Z0-9][-a-zA-Z0-9]*$") + title: str + description: Optional[str] + condition: Optional[str] + default: Optional[Union[str, float, int, bool]] + hidden: Optional[bool] = Field(default=False) + bind: Optional[str] + + +class FieldSet(BaseModel): + """Base class for FieldSet.""" + + title: str + fields: list[str] = Field(min_items=1, unique_items=True) + + +class UI2(BaseModel): + """UI items class for fieldsets.""" + + key: Literal["fieldsets"] + fieldsets: list[FieldSet] = Field(min_items=1, unique_items=True) + + +class WIPPPluginManifest(WippPluginManifest): + """Extended WIPP Plugin Schema.""" + + inputs: list[Input] = Field( + ..., + description="Defines inputs to the plugin", + title="List of Inputs", + ) + outputs: list[Output] = Field( + ..., + description="Defines the outputs of the plugin", + title="List of Outputs", + ) + ui: list[Union[UI1, UI2]] = Field(..., title="Plugin form UI definition") + version: Version + resourceRequirements: Optional[ResourceRequirements] = Field( # noqa + None, + examples=[ + { + "ramMin": 2048, + "coresMin": 1, + "cpuAVX": True, + "cpuAVX2": False, + "gpu": True, + "cudaRequirements": { + "deviceMemoryMin": 100, + "cudaComputeCapability": "8.0", + }, + }, + ], + title="Plugin Resource Requirements", + ) diff --git a/src/polus/tabular/_plugins/models/pydanticv2/PolusComputeSchema.py b/src/polus/tabular/_plugins/models/pydanticv2/PolusComputeSchema.py new file mode 100644 index 0000000..d87a986 --- /dev/null +++ b/src/polus/tabular/_plugins/models/pydanticv2/PolusComputeSchema.py @@ -0,0 +1,136 @@ +# generated by datamodel-codegen: edited by Camilo Velez +# timestamp: 2022-09-21T03:41:58+00:00 + +from __future__ import annotations + +from enum import Enum +from typing import Annotated + +from pydantic import BaseModel +from pydantic import Field +from pydantic import StringConstraints + + +class PluginInputType(Enum): + path = "path" + string = "string" + number = "number" + array = "array" + boolean = "boolean" + + +class PluginInput(BaseModel): + format: str | None = Field(None, title="Format") + label: str | None = Field(None, title="Label") + name: str = Field(..., title="Name") + required: bool = Field(..., title="Required") + type: PluginInputType + default: str | float | bool | None = Field(None, title="Default") + + +class PluginOutputType(Enum): + path = "path" + + +class PluginOutput(BaseModel): + format: str | None = Field(None, title="Format") + label: str | None = Field(None, title="Label") + name: str = Field(..., title="Name") + type: PluginOutputType + + +class GpuVendor(Enum): + none = "none" + amd = "amd" + tpu = "tpu" + nvidia = "nvidia" + + +class PluginHardwareRequirements(BaseModel): + coresMax: str | float | None = Field(None, title="Coresmax") + coresMin: str | float | None = Field(None, title="Coresmin") + cpuAVX: bool | None = Field(None, title="Cpuavx") + cpuAVX2: bool | None = Field(None, title="Cpuavx2") + cpuMin: str | None = Field(None, title="Cpumin") + gpu: GpuVendor | None = None + gpuCount: float | None = Field(None, title="Gpucount") + gpuDriverVersion: str | None = Field(None, title="Gpudriverversion") + gpuType: str | None = Field(None, title="Gputype") + outDirMax: str | float | None = Field(None, title="Outdirmax") + outDirMin: str | float | None = Field(None, title="Outdirmin") + ramMax: str | float | None = Field(None, title="Rammax") + ramMin: str | float | None = Field(None, title="Rammin") + tmpDirMax: str | float | None = Field(None, title="Tmpdirmax") + tmpDirMin: str | float | None = Field(None, title="Tmpdirmin") + + +class ThenEntry(BaseModel): + action: str = Field(..., title="Action") + input: str = Field(..., title="Input") + value: str = Field(..., title="Value") + + +class ConditionEntry(BaseModel): + expression: str = Field(..., title="Expression") + + +class Validator(BaseModel): + then: list[ThenEntry] | None = Field(None, title="Then") + validator: list[ConditionEntry] | None = Field(None, title="Validator") + + +class PluginUIType(Enum): + checkbox = "checkbox" + color = "color" + date = "date" + email = "email" + number = "number" + password = "password" + radio = "radio" + range = "range" + text = "text" + time = "time" + + +class PluginUIInput(BaseModel): + bind: str | None = Field(None, title="Bind") + condition: list[Validator] | str | None = Field(None, title="Condition") + default: str | float | bool | None = Field(None, title="Default") + description: str | None = Field(None, title="Description") + fieldset: list[str] | None = Field(None, title="Fieldset") + hidden: bool | None = Field(None, title="Hidden") + key: str = Field(..., title="Key") + title: str = Field(..., title="Title") + type: PluginUIType + + +class PluginUIOutput(BaseModel): + description: str = Field(..., title="Description") + format: str | None = Field(None, title="Format") + name: str = Field(..., title="Name") + type: PluginUIType + website: str | None = Field(None, title="Website") + + +class PluginSchema(BaseModel): + author: str | None = Field(None, title="Author") + baseCommand: list[str] | None = Field(None, title="Basecommand") + citation: str | None = Field(None, title="Citation") + containerId: str = Field(..., title="Containerid") + customInputs: bool | None = Field(None, title="Custominputs") + description: str = Field(..., title="Description") + inputs: list[PluginInput] = Field(..., title="Inputs") + institution: str | None = Field(None, title="Institution") + name: str = Field(..., title="Name") + outputs: list[PluginOutput] = Field(..., title="Outputs") + pluginHardwareRequirements: PluginHardwareRequirements + repository: str | None = Field(None, title="Repository") + title: str = Field(..., title="Title") + ui: list[PluginUIInput | PluginUIOutput] = Field(..., title="Ui") + version: Annotated[ + str, + StringConstraints( + pattern=r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$", + ), + ] = Field(..., examples=["0.1.0", "0.1.0rc1"], title="Version") + website: str | None = Field(None, title="Website") diff --git a/src/polus/tabular/_plugins/models/pydanticv2/WIPPPluginSchema.py b/src/polus/tabular/_plugins/models/pydanticv2/WIPPPluginSchema.py new file mode 100644 index 0000000..099cb32 --- /dev/null +++ b/src/polus/tabular/_plugins/models/pydanticv2/WIPPPluginSchema.py @@ -0,0 +1,241 @@ +# generated by datamodel-codegen: edited by Camilo Velez +# timestamp: 2023-01-04T14:54:38+00:00 + +from __future__ import annotations + +from enum import Enum +from typing import Annotated +from typing import Any + +from pydantic import AnyUrl +from pydantic import BaseModel +from pydantic import Field +from pydantic import StringConstraints + + +class Type(Enum): + collection = "collection" + stitchingVector = "stitchingVector" + tensorflowModel = "tensorflowModel" + csvCollection = "csvCollection" + pyramid = "pyramid" + pyramidAnnotation = "pyramidAnnotation" + notebook = "notebook" + genericData = "genericData" + string = "string" + number = "number" + integer = "integer" + enum = "enum" + array = "array" + boolean = "boolean" + + +class Input(BaseModel): + name: Annotated[ + str, + StringConstraints(pattern=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$"), + ] = Field( + ..., + description="Input name as expected by the plugin CLI", + examples=["inputImages", "fileNamePattern", "thresholdValue"], + title="Input name", + ) + type: Type = Field( + ..., + examples=["collection", "string", "number"], + title="Input Type", + ) + description: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( + ..., + examples=["Input Images"], + title="Input description", + ) + required: bool | None = Field( + True, + description="Whether an input is required or not", + examples=[True], + title="Required input", + ) + + +class Type1(Enum): + collection = "collection" + stitchingVector = "stitchingVector" + tensorflowModel = "tensorflowModel" + tensorboardLogs = "tensorboardLogs" + csvCollection = "csvCollection" + pyramid = "pyramid" + pyramidAnnotation = "pyramidAnnotation" + genericData = "genericData" + + +class Output(BaseModel): + name: Annotated[ + str, + StringConstraints(pattern=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$"), + ] = Field(..., examples=["outputCollection"], title="Output name") + type: Type1 = Field( + ..., + examples=["stitchingVector", "collection"], + title="Output type", + ) + description: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( + ..., + examples=["Output collection"], + title="Output description", + ) + + +class UiItem(BaseModel): + key: Any | Any = Field( + ..., + description="Key of the input which this UI definition applies to, the expected format is 'inputs.inputName'. Special keyword 'fieldsets' can be used to define arrangement of inputs by sections.", + examples=["inputs.inputImages", "inputs.fileNamPattern", "fieldsets"], + title="UI key", + ) + + +class CudaRequirements(BaseModel): + deviceMemoryMin: float | None = Field( + 0, + examples=[100], + title="Minimum device memory", + ) + cudaComputeCapability: str | list[Any] | None = Field( + None, + description="Specify either a single minimum value, or an array of valid values", + examples=["8.0", ["3.5", "5.0", "6.0", "7.0", "7.5", "8.0"]], + title="The cudaComputeCapability Schema", + ) + + +class ResourceRequirements(BaseModel): + ramMin: float | None = Field( + None, + examples=[2048], + title="Minimum RAM in mebibytes (Mi)", + ) + coresMin: float | None = Field( + None, + examples=[1], + title="Minimum number of CPU cores", + ) + cpuAVX: bool | None = Field( + False, + examples=[True], + title="Advanced Vector Extensions (AVX) CPU capability required", + ) + cpuAVX2: bool | None = Field( + False, + examples=[False], + title="Advanced Vector Extensions 2 (AVX2) CPU capability required", + ) + gpu: bool | None = Field( + False, + examples=[True], + title="GPU/accelerator required", + ) + cudaRequirements: CudaRequirements | None = Field( + {}, + examples=[{"deviceMemoryMin": 100, "cudaComputeCapability": "8.0"}], + title="GPU Cuda-related requirements", + ) + + +class WippPluginManifest(BaseModel): + name: Annotated[str, StringConstraints(pattern=r"^(.*)$", min_length=1)] = Field( + ..., + description="Name of the plugin (format: org/name)", + examples=["wipp/plugin-example"], + title="Plugin name", + ) + version: Annotated[str, StringConstraints(pattern=r"^(.*)$", min_length=1)] = Field( + ..., + description="Version of the plugin (semantic versioning preferred)", + examples=["1.0.0"], + title="Plugin version", + ) + title: Annotated[str, StringConstraints(pattern=r"^(.*)$", min_length=1)] = Field( + ..., + description="Plugin title to display in WIPP forms", + examples=["WIPP Plugin example"], + title="Plugin title", + ) + description: Annotated[ + str, + StringConstraints(pattern=r"^(.*)$", min_length=1), + ] = Field( + ..., + examples=["Custom image segmentation plugin"], + title="Short description of the plugin", + ) + author: Annotated[str, StringConstraints(pattern="^(.*)$")] | None | None = Field( + "", + examples=["FirstName LastName"], + title="Author(s)", + ) + institution: Annotated[ + str, + StringConstraints(pattern="^(.*)$"), + ] | None | None = Field( + "", + examples=["National Institute of Standards and Technology"], + title="Institution", + ) + repository: AnyUrl | None | None = Field( + "", + examples=["https://github.com/usnistgov/WIPP"], + title="Source code repository", + ) + website: AnyUrl | None | None = Field( + "", + examples=["http://usnistgov.github.io/WIPP"], + title="Website", + ) + citation: Annotated[str, StringConstraints(pattern="^(.*)$")] | None | None = Field( + "", + examples=[ + "Peter Bajcsy, Joe Chalfoun, and Mylene Simon (2018). Web Microanalysis of Big Image Data. Springer-Verlag International", + ], + title="Citation", + ) + containerId: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( + ..., + description="Docker image ID", + examples=["docker.io/wipp/plugin-example:1.0.0"], + title="ContainerId", + ) + baseCommand: list[str] | None = Field( + None, + description="Base command to use while running container image", + examples=[["python3", "/opt/executable/main.py"]], + title="Base command", + ) + inputs: set[Input] = Field( + ..., + description="Defines inputs to the plugin", + title="List of Inputs", + ) + outputs: list[Output] = Field( + ..., + description="Defines the outputs of the plugin", + title="List of Outputs", + ) + ui: list[UiItem] = Field(..., title="Plugin form UI definition") + resourceRequirements: ResourceRequirements | None = Field( + {}, + examples=[ + { + "ramMin": 2048, + "coresMin": 1, + "cpuAVX": True, + "cpuAVX2": False, + "gpu": True, + "cudaRequirements": { + "deviceMemoryMin": 100, + "cudaComputeCapability": "8.0", + }, + }, + ], + title="Plugin Resource Requirements", + ) diff --git a/src/polus/tabular/_plugins/models/pydanticv2/__init__.py b/src/polus/tabular/_plugins/models/pydanticv2/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/polus/tabular/_plugins/models/pydanticv2/compute.py b/src/polus/tabular/_plugins/models/pydanticv2/compute.py new file mode 100644 index 0000000..a7dc5b2 --- /dev/null +++ b/src/polus/tabular/_plugins/models/pydanticv2/compute.py @@ -0,0 +1,28 @@ +"""Extending automatically generated compute model. + +This file modifies and extend certain fields and +functions of PolusComputeSchema.py which is automatically +generated by datamodel-codegen from JSON schema. +""" + +from polus.tabular._plugins.io import IOBase +from polus.tabular._plugins.io import Version +from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import PluginInput +from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import PluginOutput +from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import PluginSchema + + +class PluginInput(PluginInput, IOBase): # type: ignore + """Base Class for Input Args.""" + + +class PluginOutput(PluginOutput, IOBase): # type: ignore + """Base Class for Output Args.""" + + +class PluginSchema(PluginSchema): # type: ignore + """Extended Compute Plugin Schema with extended IO defs.""" + + inputs: list[PluginInput] + outputs: list[PluginOutput] + version: Version diff --git a/src/polus/tabular/_plugins/models/pydanticv2/wipp.py b/src/polus/tabular/_plugins/models/pydanticv2/wipp.py new file mode 100644 index 0000000..caf757e --- /dev/null +++ b/src/polus/tabular/_plugins/models/pydanticv2/wipp.py @@ -0,0 +1,79 @@ +"""Extending automatically generated wipp model. + +This file modifies and extend certain fields and +functions of WIPPPluginSchema.py which is automatically +generated by datamodel-codegen from JSON schema. +""" +from typing import Literal +from typing import Optional +from typing import Union + +from polus.tabular._plugins.io import Input +from polus.tabular._plugins.io import Output +from polus.tabular._plugins.io import Version +from polus.tabular._plugins.models.pydanticv2.WIPPPluginSchema import ( + ResourceRequirements, +) +from polus.tabular._plugins.models.pydanticv2.WIPPPluginSchema import WippPluginManifest +from pydantic import BaseModel +from pydantic import Field + + +class UI1(BaseModel): + """Base class for UI items.""" + + key: str = Field(constr=r"^inputs.[a-zA-Z0-9][-a-zA-Z0-9]*$") + title: str + description: Optional[str] = None + condition: Optional[str] = None + default: Optional[Union[str, float, int, bool]] = None + hidden: Optional[bool] = Field(default=False) + bind: Optional[str] = None + + +class FieldSet(BaseModel): + """Base class for FieldSet.""" + + title: str + fields: set[str] = Field(min_length=1) + + +class UI2(BaseModel): + """UI items class for fieldsets.""" + + key: Literal["fieldsets"] + fieldsets: set[FieldSet] = Field(min_length=1) + + +class WIPPPluginManifest(WippPluginManifest): + """Extended WIPP Plugin Schema.""" + + inputs: list[Input] = Field( + ..., + description="Defines inputs to the plugin", + title="List of Inputs", + ) + outputs: list[Output] = Field( + ..., + description="Defines the outputs of the plugin", + title="List of Outputs", + ) + ui: list[Union[UI1, UI2]] = Field(..., title="Plugin form UI definition") + version: Version + resourceRequirements: Optional[ResourceRequirements] = Field( # noqa + None, + examples=[ + { + "ramMin": 2048, + "coresMin": 1, + "cpuAVX": True, + "cpuAVX2": False, + "gpu": True, + "cudaRequirements": { + "deviceMemoryMin": 100, + "cudaComputeCapability": "8.0", + }, + }, + ], + title="Plugin Resource Requirements", + ) diff --git a/src/polus/tabular/_plugins/models/wipp-plugin-manifest-schema.json b/src/polus/tabular/_plugins/models/wipp-plugin-manifest-schema.json new file mode 100644 index 0000000..8a407ae --- /dev/null +++ b/src/polus/tabular/_plugins/models/wipp-plugin-manifest-schema.json @@ -0,0 +1,726 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "https://raw.githubusercontent.com/usnistgov/WIPP-Plugins-base-templates/master/plugin-manifest/schema/wipp-plugin-manifest-schema.json", + "type": "object", + "title": "WIPP Plugin manifest", + "default": null, + "required": [ + "name", + "version", + "title", + "description", + "containerId", + "inputs", + "outputs", + "ui" + ], + "properties": { + "name": { + "$id": "#/properties/name", + "type": "string", + "title": "Plugin name", + "description": "Name of the plugin (format: org/name)", + "default": "", + "examples": [ + "wipp/plugin-example" + ], + "minLength": 1, + "pattern": "^(.*)$" + }, + "version": { + "$id": "#/properties/version", + "type": "string", + "title": "Plugin version", + "description": "Version of the plugin (semantic versioning preferred)", + "default": "", + "examples": [ + "1.0.0" + ], + "minLength": 1, + "pattern": "^(.*)$" + }, + "title": { + "$id": "#/properties/title", + "type": "string", + "title": "Plugin title", + "description": "Plugin title to display in WIPP forms", + "default": "", + "examples": [ + "WIPP Plugin example" + ], + "minLength": 1, + "pattern": "^(.*)$" + }, + "description": { + "$id": "#/properties/description", + "type": "string", + "title": "Description", + "title": "Short description of the plugin", + "default": "", + "examples": [ + "Custom image segmentation plugin" + ], + "minLength": 1, + "pattern": "^(.*)$" + }, + "author": { + "$id": "#/properties/author", + "type": ["string", "null"], + "title": "Author(s)", + "default": "", + "examples": [ + "FirstName LastName" + ], + "pattern": "^(.*)$" + }, + "institution": { + "$id": "#/properties/institution", + "type": ["string", "null"], + "title": "Institution", + "default": "", + "examples": [ + "National Institute of Standards and Technology" + ], + "pattern": "^(.*)$" + }, + "repository": { + "$id": "#/properties/repository", + "type": ["string", "null"], + "title": "Source code repository", + "default": "", + "examples": [ + "https://github.com/usnistgov/WIPP" + ], + "format": "uri" + }, + "website": { + "$id": "#/properties/website", + "type": ["string", "null"], + "title": "Website", + "default": "", + "examples": [ + "http://usnistgov.github.io/WIPP" + ], + "format": "uri" + }, + "citation": { + "$id": "#/properties/citation", + "type": ["string", "null"], + "title": "Citation", + "default": "", + "examples": [ + "Peter Bajcsy, Joe Chalfoun, and Mylene Simon (2018). Web Microanalysis of Big Image Data. Springer-Verlag International" + ], + "pattern": "^(.*)$" + }, + "containerId": { + "$id": "#/properties/containerId", + "type": "string", + "title": "ContainerId", + "description": "Docker image ID", + "default": "", + "examples": [ + "docker.io/wipp/plugin-example:1.0.0" + ], + "pattern": "^(.*)$" + }, + "baseCommand": { + "$id": "#/properties/baseCommand", + "type": "array", + "title": "Base command", + "description": "Base command to use while running container image", + "default": null, + "items": { + "type": "string" + }, + "examples": [ + ["python3", "/opt/executable/main.py"] + ] + }, + "inputs": { + "$id": "#/properties/inputs", + "type": "array", + "title": "List of Inputs", + "description": "Defines inputs to the plugin", + "default": null, + "uniqueItems": true, + "items": { + "$id": "#/properties/inputs/items", + "type": "object", + "title": "Input", + "description": "Plugin input", + "default": null, + "required": [ + "name", + "type", + "description" + ], + "properties": { + "name": { + "$id": "#/properties/inputs/items/properties/name", + "type": "string", + "title": "Input name", + "description": "Input name as expected by the plugin CLI", + "default": "", + "examples": [ + "inputImages", + "fileNamePattern", + "thresholdValue" + ], + "pattern": "^[a-zA-Z0-9][-a-zA-Z0-9]*$" + }, + "type": { + "$id": "#/properties/inputs/items/properties/type", + "type": "string", + "enum": [ + "collection", + "stitchingVector", + "tensorflowModel", + "csvCollection", + "pyramid", + "pyramidAnnotation", + "notebook", + "genericData", + "string", + "number", + "integer", + "enum", + "array", + "boolean" + ], + "title": "Input Type", + "examples": [ + "collection", + "string", + "number" + ] + }, + "description": { + "$id": "#/properties/inputs/items/properties/description", + "type": "string", + "title": "Input description", + "examples": [ + "Input Images" + ], + "pattern": "^(.*)$" + }, + "required": { + "$id": "#/properties/inputs/items/properties/required", + "type": "boolean", + "title": "Required input", + "description": "Whether an input is required or not", + "default": true, + "examples": [ + true + ] + } + }, + "allOf": [ + { + "if": { + "properties": { "type": { "const": "enum" } } + }, + "then": { + "properties": + { + "options": + { + "$id": "#/properties/inputs/items/properties/options", + "type": "object", + "title": "Input options", + "properties": + { + "values": + { + "type": "array", + "description": "List of possible values", + "items": + { + "type": "string" + }, + "uniqueItems": true + } + } + } + } + } + }, + { + "if": { + "properties": { "type": { "const": "array" } } + }, + "then": { + "properties": + { + "options": + { + "$id": "#/properties/inputs/items/properties/options", + "type": "object", + "title": "Input options", + "properties": + { + "items": { + "$id": "#/properties/inputs/items/properties/options/properties/items", + "type": "object", + "title": "List of array items", + "description": "Possible values for the input array", + "default": {}, + "required": [ + "type", + "title", + "oneOf", + "default", + "widget", + "minItems", + "uniqueItems" + ], + "properties": { + "type": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/type", + "type": "string", + "title": "Items type", + "description": "Type of the items to be selected", + "enum": ["string"], + "examples": [ + "string" + ] + }, + "title": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/title", + "type": "string", + "title": "Selection title", + "description": "Title of the item selection section in the form", + "default": "", + "examples": [ + "Select feature" + ] + }, + "oneOf": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf", + "type": "array", + "title": "Possible items", + "description": "List of possible items", + "default": [], + "items": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf/items", + "type": "object", + "title": "Items definition", + "description": "Description of the possible items", + "default": {}, + "required": [ + "description", + "enum" + ], + "properties": { + "description": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf/items/properties/description", + "type": "string", + "title": "Description", + "description": "Description of the value that will appear in the form", + "default": "", + "examples": [ + "Area" + ] + }, + "enum": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf/items/properties/enum", + "type": "array", + "title": "Value", + "description": "Values of the selected item", + "default": [], + "items": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf/items/properties/enum/items", + "type": "string", + "title": "List of values", + "description": "List of values associated with the selected item (usually one value)", + "default": "", + "examples": [ + "Feature2DJava_Area" + ] + } + } + }, + "examples": [ + { + "description": "Area", + "enum": [ + "Feature2DJava_Area" + ] + }, + { + "enum": [ + "Feature2DJava_Mean" + ], + "description": "Mean" + } + ] + } + }, + "default": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/default", + "type": "string", + "title": "Default value", + "description": "Value selected by default (must be one of the possible values)", + "default": "", + "examples": [ + "Feature2DJava_Area" + ] + }, + "widget": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/widget", + "type": "string", + "title": "Item selection widget", + "description": "How items can be selected (select -> dropdown list with add/remove buttons, checkbox -> multi-selection from list)", + "enum": ["select", "checkbox"], + "examples": [ + "select" + ] + }, + "minItems": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/minItems", + "type": "integer", + "title": "Minumum number of items", + "description": "Minumum number of items", + "default": 0, + "examples": [ + 1 + ] + }, + "uniqueItems": { + "$id": "#/properties/inputs/items/properties/options/properties/items/properties/uniqueItems", + "type": ["string", "boolean"], + "title": "Uniqueness of the items", + "description": "Whether items in the array have to be unique", + "examples": [ + "true", true + ] + } + }, + "examples": [ + { + "type": "string", + "widget": "select", + "uniqueItems": "true", + "default": "Feature2DJava_Area", + "minItems": 1, + "title": "Select feature", + "oneOf": [ + { + "description": "Area", + "enum": [ + "Feature2DJava_Area" + ] + }, + { + "description": "Mean", + "enum": [ + "Feature2DJava_Mean" + ] + } + ] + } + ] + } + } + } + } + } + } + ] + } + }, + "outputs": { + "$id": "#/properties/outputs", + "type": "array", + "title": "List of Outputs", + "description": "Defines the outputs of the plugin", + "default": null, + "items": { + "$id": "#/properties/outputs/items", + "type": "object", + "title": "Plugin output", + "default": null, + "required": [ + "name", + "type", + "description" + ], + "properties": { + "name": { + "$id": "#/properties/outputs/items/properties/name", + "type": "string", + "title": "Output name", + "default": "", + "examples": [ + "outputCollection" + ], + "pattern": "^[a-zA-Z0-9][-a-zA-Z0-9]*$" + }, + "type": { + "$id": "#/properties/outputs/items/properties/type", + "type": "string", + "enum": [ + "collection", + "stitchingVector", + "tensorflowModel", + "tensorboardLogs", + "csvCollection", + "pyramid", + "pyramidAnnotation", + "genericData" + ], + "title": "Output type", + "examples": [ + "stitchingVector", + "collection" + ] + }, + "description": { + "$id": "#/properties/outputs/items/properties/description", + "type": "string", + "title": "Output description", + "examples": [ + "Output collection" + ], + "pattern": "^(.*)$" + } + } + } + }, + "ui": { + "$id": "#/properties/ui", + "type": "array", + "title": "Plugin form UI definition", + "items": + { + "type": "object", + "title": "List of UI definitions", + "required": [ + "key" + ], + "properties": { + "key": { + "$id": "#/properties/ui/items/properties/key", + "type": "string", + "title": "UI key", + "description": "Key of the input which this UI definition applies to, the expected format is 'inputs.inputName'. Special keyword 'fieldsets' can be used to define arrangement of inputs by sections.", + "examples": [ + "inputs.inputImages", "inputs.fileNamPattern", "fieldsets" + ], + "oneOf": [ + {"pattern": "^inputs\\.[a-zA-Z0-9][-a-zA-Z0-9]*$"}, + {"const": "fieldsets"} + ] + } + }, + "allOf": [ + { + "if": { + "properties": { "key": { "pattern": "^inputs\\.[a-zA-Z0-9][-a-zA-Z0-9]*$" } } + }, + "then": { + "properties": + { + "title": { + "$id": "#/properties/ui/items/properties/title", + "type": "string", + "title": "Input label", + "default": "", + "examples": [ + "Input images: " + ], + "pattern": "^(.*)$" + }, + "description": { + "$id": "#/properties/ui/items/properties/description", + "type": "string", + "title": "Input placeholder", + "default": "", + "examples": [ + "Pick a collection..." + ], + "pattern": "^(.*)$" + }, + "condition": { + "$id": "#/properties/ui/items/properties/condition", + "type": "string", + "title": "Input visibility condition", + "description": "Definition of when this field is visible or not, depending on the value of another input, the expected format for the condition is 'model.inputs.inputName==value'", + "default": "", + "examples": [ + "model.inputs.thresholdtype=='Manual'" + ], + "pattern": "^(.*)$" + }, + "default": { + "$id": "#/properties/ui/items/properties/default", + "type": ["string", "number", "integer", "boolean"], + "title": "Input default value", + "default": "", + "examples": [ + 5, false, ".ome.tif" + ] + }, + "hidden": { + "$id": "#/properties/ui/items/properties/hidden", + "type": "boolean", + "title": "Hidden input", + "description": "Hidden input will not be displayed on the form, but can be used in conjunction with the 'default' or 'bind' properties to define default or automatically set parameters", + "default": false, + "examples": [ + true, false + ] + }, + "bind": { + "$id": "#/properties/ui/items/properties/bind", + "type": "string", + "title": "Bind input value to another input", + "examples": [ + "gridWidth" + ] + } + }, + "required": [ + "title" + ] + } + }, + { + "if": { + "properties": { "key": { "const": "fieldsets" } } + }, + "then": { + "properties": + { + "fieldsets": + { + "description": "A list of definitions representing sections of input fields.", + "type": "array", + "items": { + "description": "A section of input fields.", + "type": "object", + "properties": { + "title": { + "type": "string", + "description": "The label of the section.", + "examples": [ + "Input images selection" + ] + }, + "fields": { + "description": "A list of input names representing input fields that belong to this section.", + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true, + "minItems": 1, + "examples": [ + "inputImages, fileNamePattern" + ] + } + }, + "uniqueItems": true, + "default": [], + "minItems": 1, + "required": [ + "title", "fields" + ] + } + } + }, + "required": [ + "fieldsets" + ] + } + } + ] + } + }, + "resourceRequirements": { + "type": "object", + "default": {}, + "title": "Plugin Resource Requirements", + "properties": { + "ramMin": { + "type": "number", + "title": "Minimum RAM in mebibytes (Mi)", + "examples": [ + 2048 + ] + }, + "coresMin": { + "type": "number", + "title": "Minimum number of CPU cores", + "examples": [ + 1 + ] + }, + "cpuAVX": { + "type": "boolean", + "default": false, + "title": "Advanced Vector Extensions (AVX) CPU capability required", + "examples": [ + true + ] + }, + "cpuAVX2": { + "type": "boolean", + "default": false, + "title": "Advanced Vector Extensions 2 (AVX2) CPU capability required", + "examples": [ + false + ] + }, + "gpu": { + "type": "boolean", + "default": false, + "title": "GPU/accelerator required", + "examples": [ + true + ] + }, + "cudaRequirements": { + "type": "object", + "default": {}, + "title": "GPU Cuda-related requirements", + "properties": { + "deviceMemoryMin": { + "type": "number", + "default": 0, + "title": "Minimum device memory", + "examples": [ + 100 + ] + }, + "cudaComputeCapability": { + "type": ["string", "array"], + "default": null, + "title": "The cudaComputeCapability Schema", + "description": "Specify either a single minimum value, or an array of valid values", + "examples": [ + "8.0", + ["3.5", "5.0", "6.0", "7.0", "7.5", "8.0"] + ] + } + }, + "examples": [{ + "deviceMemoryMin": 100, + "cudaComputeCapability": "8.0" + }] + } + }, + "examples": [{ + "ramMin": 2048, + "coresMin": 1, + "cpuAVX": true, + "cpuAVX2": false, + "gpu": true, + "cudaRequirements": { + "deviceMemoryMin": 100, + "cudaComputeCapability": "8.0" + } + }] + } + } + } diff --git a/src/polus/tabular/_plugins/registry.py b/src/polus/tabular/_plugins/registry.py new file mode 100644 index 0000000..dfb0daa --- /dev/null +++ b/src/polus/tabular/_plugins/registry.py @@ -0,0 +1,280 @@ +"""Methods to interact with REST API of WIPP Plugin Registry.""" +import json +import logging +import typing +from urllib.error import HTTPError +from urllib.parse import urljoin + +import requests +import xmltodict +from tqdm import tqdm + +from polus.tabular._plugins.classes import ComputePlugin, Plugin, refresh, submit_plugin +from polus.tabular._plugins.registry_utils import _generate_query, _to_xml + +logger = logging.getLogger("polus.tabular") + + +class FailedToPublish(Exception): + """Raised when there is an error publishing a resource.""" + + +class MissingUserInfo(Exception): + """Raised when necessary user info is not provided for authentication.""" + + +class WippPluginRegistry: + """Class that contains methods to interact with the REST API of WIPP Registry.""" + + def __init__( + self, + username: typing.Optional[str] = None, + password: typing.Optional[str] = None, + registry_url: str = "https://wipp-registry.ci.ncats.io", + verify: bool = True, # verify SSL? + ): + """Initialize WippPluginRegistry from username, password, registry url.""" + self.registry_url = registry_url + self.username = username + self.password = password + self.verify = verify + + @classmethod + def _parse_xml(cls, xml: str): + """Return dictionary of Plugin Manifest. If error, return None.""" + d = xmltodict.parse(xml)["Resource"]["role"]["PluginManifest"][ + "PluginManifestContent" + ]["#text"] + try: + return json.loads(d) + except BaseException: + e = eval(d) + if isinstance(e, dict): + return e + else: + return None + + def update_plugins(self): + """Update plugins from WIPP Registry.""" + url = self.registry_url + "/rest/data/query/" + headers = {"Content-type": "application/json"} + data = '{"query": {"$or":[{"Resource.role.type":"Plugin"},{"Resource.role.type.#text":"Plugin"}]}}' + if self.username and self.password: + r = requests.post( + url, + headers=headers, + data=data, + auth=(self.username, self.password), + verify=self.verify, + ) # authenticated request + else: + r = requests.post(url, headers=headers, data=data, verify=self.verify) + valid, invalid = 0, {} + + for r in tqdm(r.json()["results"], desc="Updating Plugins from WIPP"): + try: + manifest = WippPluginRegistry._parse_xml(r["xml_content"]) + submit_plugin(manifest) + valid += 1 + except BaseException as err: + invalid.update({r["title"]: err.args[0]}) + + finally: + if len(invalid) > 0: + self.invalid = invalid + logger.debug( + "Submitted %s plugins successfully. See WippPluginRegistry.invalid to check errors in unsubmitted plugins" + % (valid) + ) + logger.debug("Submitted %s plugins successfully." % (valid)) + refresh() + + def query( + self, + title: typing.Optional[str] = None, + version: typing.Optional[str] = None, + title_contains: typing.Optional[str] = None, + contains: typing.Optional[str] = None, + query_all: bool = False, + advanced: bool = False, + query: typing.Optional[str] = None, + ): + """Query Plugins in WIPP Registry. + + This function executes queries for Plugins in the WIPP Registry. + + Args: + title: + title of the plugin to query. + Example: "OME Tiled Tiff Converter" + version: + version of the plugins to query. + Must follow semantic versioning. Example: "1.1.0" + title_contains: + keyword that must be part of the title of plugins to query. + Example: "Converter" will return all plugins with the word "Converter" in their title + contains: + keyword that must be part of the description of plugins to query. + Example: "bioformats" will return all plugins with the word "bioformats" in their description + query_all: if True it will override any other parameter and will return all plugins + advanced: + if True it will override any other parameter. + `query` must be included + query: query to execute. This query must be in MongoDB format + + + Returns: + An array of the manifests of the Plugins returned by the query. + """ + url = self.registry_url + "/rest/data/query/" + headers = {"Content-type": "application/json"} + query = _generate_query( + title, version, title_contains, contains, query_all, advanced, query + ) + + data = '{"query": %s}' % str(query).replace("'", '"') + + if self.username and self.password: + r = requests.post( + url, + headers=headers, + data=data, + auth=(self.username, self.password), + verify=self.verify, + ) # authenticated request + else: + r = requests.post(url, headers=headers, data=data, verify=self.verify) + logger.debug(f"r is {r.raise_for_status}") + return [ + WippPluginRegistry._parse_xml(x["xml_content"]) for x in r.json()["results"] + ] + + def get_current_schema(self): + """Return current schema in WIPP.""" + r = requests.get( + urljoin( + self.registry_url, + "rest/template-version-manager/global/?title=res-md.xsd", + ), + verify=self.verify, + ) + if r.ok: + return r.json()[0]["current"] + else: + r.raise_for_status() + + def upload( + self, + plugin: typing.Union[Plugin, ComputePlugin], + author: typing.Optional[str] = None, + email: typing.Optional[str] = None, + publish: bool = True, + ): + """Upload Plugin to WIPP Registry. + + This function uploads a Plugin object to the WIPP Registry. + Author name and email to be passed to the Plugin object + information on the WIPP Registry are taken from the value + of the field `author` in the `Plugin` manifest. That is, + the first email and the first name (first and last) will + be passed. The value of these two fields can be overridden + by specifying them in the arguments. + + Args: + plugin: + Plugin to be uploaded + author: + Optional `str` to override author name + email: + Optional `str` to override email + publish: + If `False`, Plugin will not be published to the public + workspace. It will be visible only to the user uploading + it. Default is `True` + + Returns: + A message indicating a successful upload. + """ + manifest = plugin.manifest + + xml_content = _to_xml(manifest, author, email) + + schema_id = self.get_current_schema() + + data = { + "title": manifest["name"], + "template": schema_id, + "xml_content": xml_content, + } + + url = self.registry_url + "/rest/data/" + headers = {"Content-type": "application/json"} + if self.username and self.password: + r = requests.post( + url, + headers=headers, + data=json.dumps(data), + auth=(self.username, self.password), + verify=self.verify, + ) # authenticated request + else: + raise MissingUserInfo("The registry connection must be authenticated.") + + response_code = r.status_code + + if response_code != 201: + print( + "Error uploading file (%s), code %s" + % (data["title"], str(response_code)) + ) + r.raise_for_status() + if publish: + _id = r.json()["id"] + _purl = url + _id + "/publish/" + r2 = requests.patch( + _purl, + headers=headers, + auth=(self.username, self.password), + verify=self.verify, + ) + try: + r2.raise_for_status() + except HTTPError as err: + raise FailedToPublish( + "Failed to publish {} with id {}".format(data["title"], _id) + ) from err + + return "Successfully uploaded %s" % data["title"] + + def get_resource_by_pid(self, pid): + """Return current resource.""" + response = requests.get(pid, verify=self.verify) + return response.json() + + def patch_resource( + self, + pid, + version, + ): + """Patch resource in registry.""" + if self.username is None or self.password is None: + raise MissingUserInfo("The registry connection must be authenticated.") + + # Get current version of the resource + data = self.get_resource_by_pid(pid) + + data.update({"version": version}) + response = requests.patch( + urljoin(self.registry_url, "rest/data/" + data["id"]), + data, + auth=(self.username, self.password), + verify=self.verify, + ) + response_code = response.status_code + + if response_code != 200: + print( + "Error publishing data (%s), code %s" + % (data["title"], str(response_code)) + ) + response.raise_for_status() diff --git a/src/polus/tabular/_plugins/registry_utils.py b/src/polus/tabular/_plugins/registry_utils.py new file mode 100644 index 0000000..8c2bd51 --- /dev/null +++ b/src/polus/tabular/_plugins/registry_utils.py @@ -0,0 +1,135 @@ +"""Utilities for WIPP Registry Module.""" +import re +import typing + + +def _generate_query( + title, version, title_contains, contains, query_all, advanced, query +): + if advanced: + if not query: + raise ValueError("query cannot be empty if advanced is True") + else: + return query + if query_all: + q = { + "$or": [ + {"Resource.role.type": "Plugin"}, + {"Resource.role.type.#text": "Plugin"}, + ] + } # replace query + return q + + # Check for possible errors: + if title and title_contains: + raise ValueError("Cannot define title and title_contains together") + q = {} # query to return + q["$and"] = [] + q["$and"].append( + { + "$or": [ + {"Resource.role.type": "Plugin"}, + {"Resource.role.type.#text": "Plugin"}, + ] + } + ) + if title: + q["$and"].append( + { + "$or": [ + {"Resource.identity.title.#text": title}, + {"Resource.identity.title": title}, + ] + } + ) + if version: + q["$and"].append( + { + "$or": [ + {"Resource.identity.version.#text": version}, + {"Resource.identity.version": version}, + ] + } + ) + if contains: + q["$and"].append( + { + "$or": [ + { + "Resource.content.description.#text": { + "$regex": f".*{contains}.*", + "$options": "i", + } + }, + { + "Resource.content.description": { + "$regex": f".*{contains}.*", + "$options": "i", + } + }, + ] + } + ) + if title_contains: + q["$and"].append( + { + "$or": [ + { + "Resource.identity.title.#text": { + "$regex": f".*{title_contains}.*", + "$options": "i", + } + }, + { + "Resource.identity.title": { + "$regex": f".*{title_contains}.*", + "$options": "i", + } + }, + ] + } + ) + return q + + +def _get_email(author: str): + regex = re.compile(r"[A-Za-z][A-Za-z0-9.]*@[A-Za-z0-9.]*") + return regex.search(author).group() + + +def _get_author(author: str): + return " ".join(author.split()[0:2]) + + +def _to_xml( + manifest: dict, + author: typing.Optional[str] = None, + email: typing.Optional[str] = None, +): + if email is None: + email = _get_email(manifest["author"]) + if author is None: + author = _get_author(manifest["author"]) + + xml = ( + '' + f'{manifest["name"]}' + f'{str(manifest["version"])}' + '' + f'{manifest["institution"]}' + '' + f'{author}' + f'{email}' + '' + f'{manifest["description"]}' + '' + 'Plugin' + f'{manifest["containerId"]}' + '' + f'{str(manifest)}' + ) + + return xml diff --git a/src/polus/tabular/_plugins/update/__init__.py b/src/polus/tabular/_plugins/update/__init__.py new file mode 100644 index 0000000..a6fef0d --- /dev/null +++ b/src/polus/tabular/_plugins/update/__init__.py @@ -0,0 +1,6 @@ +"""Initialize update module.""" + +from polus.tabular._plugins.update._update import update_nist_plugins +from polus.tabular._plugins.update._update import update_polus_plugins + +__all__ = ["update_polus_plugins", "update_nist_plugins"] diff --git a/src/polus/tabular/_plugins/update/_update.py b/src/polus/tabular/_plugins/update/_update.py new file mode 100644 index 0000000..4998ee5 --- /dev/null +++ b/src/polus/tabular/_plugins/update/_update.py @@ -0,0 +1,116 @@ +# pylint: disable=W1203, W1201 +import json +import logging +import re +import typing + +from polus.tabular._plugins._compat import PYDANTIC_V2 +from polus.tabular._plugins.classes import refresh +from polus.tabular._plugins.classes import submit_plugin +from polus.tabular._plugins.gh import _init_github +from polus.tabular._plugins.io import Version +from polus.tabular._plugins.manifests import _error_log +from polus.tabular._plugins.manifests import _scrape_manifests +from pydantic import ValidationError +from tqdm import tqdm + +logger = logging.getLogger("polus.tabular") + + +def update_polus_plugins( + gh_auth: typing.Optional[str] = None, + min_depth: int = 2, + max_depth: int = 3, +) -> None: + """Scrape PolusAI GitHub repo and create local versions of Plugins.""" + logger.info("Updating polus plugins.") + # Get all manifests + valid, invalid = _scrape_manifests( + "polusai/polus-plugins", + _init_github(gh_auth), + min_depth, + max_depth, + True, + ) + manifests = valid.copy() + manifests.extend(invalid) + logger.info(f"Submitting {len(manifests)} plugins.") + + for manifest in manifests: + try: + plugin = submit_plugin(manifest) + + # Parsing checks specific to polus-plugins + error_list = [] + + # Check that plugin version matches container version tag + container_name, version = tuple(plugin.containerId.split(":")) + version = Version(version) if PYDANTIC_V2 else Version(version=version) + organization, container_name = tuple(container_name.split("/")) + if plugin.version != version: + msg = ( + f"containerId version ({version}) does not " + f"match plugin version ({plugin.version})" + ) + logger.error(msg) + error_list.append(ValueError(msg)) + + # Check to see that the plugin is registered to Labshare + if organization not in ["polusai", "labshare"]: + msg = ( + "all polus plugin containers must be" + " under the Labshare organization." + ) + logger.error(msg) + error_list.append(ValueError(msg)) + + # Checks for container name, they are somewhat related to our + # Jenkins build + if not container_name.startswith("polus"): + msg = "containerId name must begin with polus-" + logger.error(msg) + error_list.append(ValueError(msg)) + + if not container_name.endswith("plugin"): + msg = "containerId name must end with -plugin" + logger.error(msg) + error_list.append(ValueError(msg)) + + if len(error_list) > 0: + raise ValidationError(error_list, plugin.__class__) + + except ValidationError as val_err: + try: + _error_log(val_err, manifest, "update_polus_plugins") + except BaseException as e: # pylint: disable=W0718 + logger.exception(f"In {plugin.name}: {e}") + except BaseException as e: # pylint: disable=W0718 + logger.exception(f"In {plugin.name}: {e}") + refresh() + + +def update_nist_plugins(gh_auth: typing.Optional[str] = None) -> None: + """Scrape NIST GitHub repo and create local versions of Plugins.""" + # Parse README links + gh = _init_github(gh_auth) + repo = gh.get_repo("usnistgov/WIPP") + contents = repo.get_contents("plugins") + readme = [r for r in contents if r.name == "README.md"][0] + pattern = re.compile( + r"\[manifest\]\((https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*))\)", + ) + matches = pattern.findall(str(readme.decoded_content)) + logger.info("Updating NIST plugins.") + for match in tqdm(matches, desc="NIST Manifests"): + url_parts = match[0].split("/")[3:] + plugin_repo = gh.get_repo("/".join(url_parts[:2])) + manifest = json.loads( + plugin_repo.get_contents("/".join(url_parts[4:])).decoded_content, + ) + + try: + submit_plugin(manifest) + + except ValidationError as val_err: + _error_log(val_err, manifest, "update_nist_plugins") + refresh() diff --git a/src/polus/tabular/_plugins/utils.py b/src/polus/tabular/_plugins/utils.py new file mode 100644 index 0000000..152c1a7 --- /dev/null +++ b/src/polus/tabular/_plugins/utils.py @@ -0,0 +1,17 @@ +"""General utilities for polus-plugins.""" +from polus.tabular._plugins.io import Version + + +def name_cleaner(name: str) -> str: + """Generate Plugin Class Name from Plugin name in manifest.""" + replace_chars = "()<>-_" + for char in replace_chars: + name = name.replace(char, " ") + return name.title().replace(" ", "").replace("/", "_") + + +def cast_version(value): + """Return Version object from version str or dict.""" + if isinstance(value, dict): # if init from a Version object + value = value["version"] + return Version(version=value) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..4ede8e6 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1 @@ +# noqa diff --git a/tests/resources/b1.json b/tests/resources/b1.json new file mode 100644 index 0000000..a385c3b --- /dev/null +++ b/tests/resources/b1.json @@ -0,0 +1,77 @@ +{ + "version": "1.2.7", + "title": "Flatfield correction using BaSiC algorithm.", + "description": "Generates images used for flatfield correction using the BaSiC algorithm.", + "author": "Nick Schaub (nick.schaub@nih.gov)", + "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/polusai/polus-plugins", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", + "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", + "inputs": [ + { + "name": "inpDir", + "type": "collection", + "description": "Input image collection.", + "required": true + }, + { + "name": "filePattern", + "type": "string", + "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", + "required": true + }, + { + "name": "darkfield", + "type": "boolean", + "description": "Calculate darkfield image.", + "required": true + }, + { + "name": "photobleach", + "type": "boolean", + "description": "Calculate photobleaching offsets.", + "required": true + }, + { + "name": "groupBy", + "type": "string", + "description": "Group images together for flatfield by variable.", + "required": false + } + ], + "outputs": [ + { + "name": "outDir", + "type": "collection", + "description": "Output data for the plugin" + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input image collection: ", + "description": "Image collection..." + }, + { + "key": "inputs.filePattern", + "title": "Filename pattern: ", + "description": "Use a filename pattern to calculate flatfield information by subsets" + }, + { + "key": "inputs.groupBy", + "title": "Grouping Variables: ", + "description": "Group data together with varying variable values." + }, + { + "key": "inputs.darkfield", + "title": "Calculate darkfield: ", + "description": "If selected, will generate a darkfield image" + }, + { + "key": "inputs.photobleach", + "title": "Calclate photobleaching offset: ", + "description": "If selected, will generate an offset scalar for each image" + } + ] +} diff --git a/tests/resources/b2.json b/tests/resources/b2.json new file mode 100644 index 0000000..3e28f5b --- /dev/null +++ b/tests/resources/b2.json @@ -0,0 +1,76 @@ +{ + "name": "BaSiC Flatfield Correction Plugin", + "version": "1.2.7", + "description": "Generates images used for flatfield correction using the BaSiC algorithm.", + "author": "Nick Schaub (nick.schaub@nih.gov)", + "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/polusai/polus-plugins", + "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", + "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", + "inputs": [ + { + "name": "inpDir", + "type": "collection", + "description": "Input image collection.", + "required": true + }, + { + "name": "filePattern", + "type": "string", + "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", + "required": true + }, + { + "name": "darkfield", + "type": "boolean", + "description": "Calculate darkfield image.", + "required": true + }, + { + "name": "photobleach", + "type": "boolean", + "description": "Calculate photobleaching offsets.", + "required": true + }, + { + "name": "groupBy", + "type": "string", + "description": "Group images together for flatfield by variable.", + "required": false + } + ], + "outputs": [ + { + "name": "outDir", + "type": "collection", + "description": "Output data for the plugin" + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input image collection: ", + "description": "Image collection..." + }, + { + "key": "inputs.filePattern", + "title": "Filename pattern: ", + "description": "Use a filename pattern to calculate flatfield information by subsets" + }, + { + "key": "inputs.groupBy", + "title": "Grouping Variables: ", + "description": "Group data together with varying variable values." + }, + { + "key": "inputs.darkfield", + "title": "Calculate darkfield: ", + "description": "If selected, will generate a darkfield image" + }, + { + "key": "inputs.photobleach", + "title": "Calclate photobleaching offset: ", + "description": "If selected, will generate an offset scalar for each image" + } + ] +} diff --git a/tests/resources/b3.json b/tests/resources/b3.json new file mode 100644 index 0000000..f161974 --- /dev/null +++ b/tests/resources/b3.json @@ -0,0 +1,76 @@ +{ + "name": "BaSiC Flatfield Correction Plugin", + "version": "1.2.7", + "title": "Flatfield correction using BaSiC algorithm.", + "author": "Nick Schaub (nick.schaub@nih.gov)", + "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/polusai/polus-plugins", + "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", + "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", + "inputs": [ + { + "name": "inpDir", + "type": "collection", + "description": "Input image collection.", + "required": true + }, + { + "name": "filePattern", + "type": "string", + "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", + "required": true + }, + { + "name": "darkfield", + "type": "boolean", + "description": "Calculate darkfield image.", + "required": true + }, + { + "name": "photobleach", + "type": "boolean", + "description": "Calculate photobleaching offsets.", + "required": true + }, + { + "name": "groupBy", + "type": "string", + "description": "Group images together for flatfield by variable.", + "required": false + } + ], + "outputs": [ + { + "name": "outDir", + "type": "collection", + "description": "Output data for the plugin" + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input image collection: ", + "description": "Image collection..." + }, + { + "key": "inputs.filePattern", + "title": "Filename pattern: ", + "description": "Use a filename pattern to calculate flatfield information by subsets" + }, + { + "key": "inputs.groupBy", + "title": "Grouping Variables: ", + "description": "Group data together with varying variable values." + }, + { + "key": "inputs.darkfield", + "title": "Calculate darkfield: ", + "description": "If selected, will generate a darkfield image" + }, + { + "key": "inputs.photobleach", + "title": "Calclate photobleaching offset: ", + "description": "If selected, will generate an offset scalar for each image" + } + ] +} diff --git a/tests/resources/g1.json b/tests/resources/g1.json new file mode 100644 index 0000000..ca32f19 --- /dev/null +++ b/tests/resources/g1.json @@ -0,0 +1,78 @@ +{ + "name": "BaSiC Flatfield Correction Plugin", + "version": "1.2.7", + "title": "Flatfield correction using BaSiC algorithm.", + "description": "Generates images used for flatfield correction using the BaSiC algorithm.", + "author": "Nick Schaub (nick.schaub@nih.gov)", + "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/polusai/polus-plugins", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", + "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", + "inputs": [ + { + "name": "inpDir", + "type": "collection", + "description": "Input image collection.", + "required": true + }, + { + "name": "filePattern", + "type": "string", + "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", + "required": true + }, + { + "name": "darkfield", + "type": "boolean", + "description": "Calculate darkfield image.", + "required": true + }, + { + "name": "photobleach", + "type": "boolean", + "description": "Calculate photobleaching offsets.", + "required": true + }, + { + "name": "groupBy", + "type": "string", + "description": "Group images together for flatfield by variable.", + "required": false + } + ], + "outputs": [ + { + "name": "outDir", + "type": "collection", + "description": "Output data for the plugin" + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input image collection: ", + "description": "Image collection..." + }, + { + "key": "inputs.filePattern", + "title": "Filename pattern: ", + "description": "Use a filename pattern to calculate flatfield information by subsets" + }, + { + "key": "inputs.groupBy", + "title": "Grouping Variables: ", + "description": "Group data together with varying variable values." + }, + { + "key": "inputs.darkfield", + "title": "Calculate darkfield: ", + "description": "If selected, will generate a darkfield image" + }, + { + "key": "inputs.photobleach", + "title": "Calclate photobleaching offset: ", + "description": "If selected, will generate an offset scalar for each image" + } + ] +} diff --git a/tests/resources/g2.json b/tests/resources/g2.json new file mode 100644 index 0000000..24d32be --- /dev/null +++ b/tests/resources/g2.json @@ -0,0 +1,77 @@ +{ + "version": "1.2.7", + "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", + "description": "Generates images used for flatfield correction using the BaSiC algorithm.", + "name": "BaSiC Flatfield Correction Plugin", + "author": "Nick Schaub (nick.schaub@nih.gov)", + "repository": "https://github.com/polusai/polus-plugins", + "title": "Flatfield correction using BaSiC algorithm.", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", + "inputs": [ + { + "name": "inpDir", + "type": "collection", + "description": "Input image collection.", + "required": true + }, + { + "name": "filePattern", + "type": "string", + "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", + "required": true + }, + { + "name": "darkfield", + "type": "boolean", + "description": "Calculate darkfield image.", + "required": true + }, + { + "name": "photobleach", + "type": "boolean", + "description": "Calculate photobleaching offsets.", + "required": true + }, + { + "name": "groupBy", + "type": "string", + "description": "Group images together for flatfield by variable.", + "required": false + } + ], + "outputs": [ + { + "name": "outDir", + "type": "collection", + "description": "Output data for the plugin" + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input image collection: ", + "description": "Image collection..." + }, + { + "key": "inputs.filePattern", + "title": "Filename pattern: ", + "description": "Use a filename pattern to calculate flatfield information by subsets" + }, + { + "key": "inputs.groupBy", + "title": "Grouping Variables: ", + "description": "Group data together with varying variable values." + }, + { + "key": "inputs.darkfield", + "title": "Calculate darkfield: ", + "description": "If selected, will generate a darkfield image" + }, + { + "key": "inputs.photobleach", + "title": "Calclate photobleaching offset: ", + "description": "If selected, will generate an offset scalar for each image" + } + ] +} diff --git a/tests/resources/g3.json b/tests/resources/g3.json new file mode 100644 index 0000000..e589644 --- /dev/null +++ b/tests/resources/g3.json @@ -0,0 +1,77 @@ +{ + "name": "BaSiC Flatfield Correction Plugin", + "version": "1.2.7", + "title": "Flatfield correction using BaSiC algorithm.", + "description": "Generates images used for flatfield correction using the BaSiC algorithm.", + "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/polusai/polus-plugins", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", + "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", + "inputs": [ + { + "name": "inpDir", + "type": "collection", + "description": "Input image collection.", + "required": true + }, + { + "name": "filePattern", + "type": "string", + "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", + "required": true + }, + { + "name": "darkfield", + "type": "boolean", + "description": "Calculate darkfield image.", + "required": true + }, + { + "name": "photobleach", + "type": "boolean", + "description": "Calculate photobleaching offsets.", + "required": true + }, + { + "name": "groupBy", + "type": "string", + "description": "Group images together for flatfield by variable.", + "required": false + } + ], + "outputs": [ + { + "name": "outDir", + "type": "collection", + "description": "Output data for the plugin" + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input image collection: ", + "description": "Image collection..." + }, + { + "key": "inputs.filePattern", + "title": "Filename pattern: ", + "description": "Use a filename pattern to calculate flatfield information by subsets" + }, + { + "key": "inputs.groupBy", + "title": "Grouping Variables: ", + "description": "Group data together with varying variable values." + }, + { + "key": "inputs.darkfield", + "title": "Calculate darkfield: ", + "description": "If selected, will generate a darkfield image" + }, + { + "key": "inputs.photobleach", + "title": "Calclate photobleaching offset: ", + "description": "If selected, will generate an offset scalar for each image" + } + ] +} diff --git a/tests/resources/omeconverter022.json b/tests/resources/omeconverter022.json new file mode 100644 index 0000000..b696f46 --- /dev/null +++ b/tests/resources/omeconverter022.json @@ -0,0 +1,45 @@ +{ + "name": "OME Converter", + "version": "0.2.2", + "title": "OME Converter", + "description": "Convert Bioformats supported format to OME Zarr.", + "author": "Nick Schaub (nick.schaub@nih.gov)", + "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/labshare/polus-plugins", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": "", + "containerId": "polusai/ome-converter-plugin:0.2.2", + "inputs": [ + { + "name": "inpDir", + "type": "genericData", + "description": "Input generic data collection to be processed by this plugin", + "required": true + }, + { + "name": "filePatter", + "type": "string", + "description": "A filepattern, used to select data to be converted", + "required": true + } + ], + "outputs": [ + { + "name": "outDir", + "type": "genericData", + "description": "Output collection" + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input generic collection", + "description": "Input generic data collection to be processed by this plugin" + }, + { + "key": "inputs.filePattern", + "title": "Filepattern", + "description": "A filepattern, used to select data for conversion" + } + ] +} diff --git a/tests/resources/tabularconverter.json b/tests/resources/tabularconverter.json new file mode 100644 index 0000000..ea9cfbb --- /dev/null +++ b/tests/resources/tabularconverter.json @@ -0,0 +1,75 @@ +{ + "name": "Tabular Converter", + "version": "0.1.2-dev1", + "title": "Tabular Converter", + "description": "WIPP plugin allows tabular data conversion arrow file format and vice versa.", + "author": "Kelechi Nina Mezu (nina.mezu@nih.gov), Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov)", + "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/PolusAI/tabular-tools", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": "", + "containerId": "polusai/tabular-converter-tool:0.1.2-dev1", + "baseCommand": [ + "python3", + "-m", + "polus.tabular.formats.tabular_converter" + ], + "inputs": [ + { + "name": "inpDir", + "type": "genericData", + "description": "Input data collection to be processed by this plugin", + "required": true + }, + { + "name": "filePattern", + "type": "string", + "description": "Pattern to parse input files", + "required": false + }, + { + "name": "fileExtension", + "type": "enum", + "description": "File format of an output file", + "required": true, + "options": { + "values": [ + ".csv", + ".fits", + ".fcs", + ".feather", + ".parquet", + ".hdf5", + ".arrow", + "default" + ] + } + } + ], + "outputs": [ + { + "name": "outDir", + "type": "genericData", + "description": "Output directory" + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input collection", + "description": "Input data collection to be processed by this plugin" + }, + { + "key": "inputs.filePattern", + "title": "FilePattern", + "description": "Pattern to parse input files", + "default": ".+" + }, + { + "key": "inputs.fileExtension", + "title": "FileExtension", + "description": "Desired file format of an ouput file", + "default": ".arrow" + } + ] +} \ No newline at end of file diff --git a/formats/tabular-to-arrow-tool/tabulartoarrow.cwl b/tests/resources/target1.cwl similarity index 78% rename from formats/tabular-to-arrow-tool/tabulartoarrow.cwl rename to tests/resources/target1.cwl index 10eb175..9185bee 100644 --- a/formats/tabular-to-arrow-tool/tabulartoarrow.cwl +++ b/tests/resources/target1.cwl @@ -1,10 +1,14 @@ class: CommandLineTool cwlVersion: v1.2 inputs: + fileExtension: + inputBinding: + prefix: --fileExtension + type: string filePattern: inputBinding: prefix: --filePattern - type: string + type: string? inpDir: inputBinding: prefix: --inpDir @@ -20,7 +24,7 @@ outputs: type: Directory requirements: DockerRequirement: - dockerPull: polusai/tabular-to-arrow-tool:0.2.3-dev0 + dockerPull: polusai/tabular-converter-tool:0.1.2-dev1 InitialWorkDirRequirement: listing: - entry: $(inputs.outDir) diff --git a/tests/test_cwl.py b/tests/test_cwl.py new file mode 100644 index 0000000..56cb847 --- /dev/null +++ b/tests/test_cwl.py @@ -0,0 +1,105 @@ +# type: ignore +# pylint: disable=W0621, W0613 +"""Tests for CWL utils.""" +from pathlib import Path + +import pydantic +import pytest +import yaml + +import polus.tabular as pp +from polus.tabular._plugins.classes.plugin_base import MissingInputValuesError + +PYDANTIC_VERSION = pydantic.__version__.split(".")[0] +RSRC_PATH = Path(__file__).parent.joinpath("resources") + +TabularConverter = RSRC_PATH.joinpath("tabularconverter.json") + + +@pytest.fixture +def submit_plugin(): + """Submit TabularConverter plugin.""" + if "TabularConverter" not in pp.list: + pp.submit_plugin(TabularConverter) + else: + if "0.1.2-dev1" not in pp.TabularConverter.versions: + pp.submit_plugin(TabularConverter) + + +@pytest.fixture +def plug(submit_plugin): + """Get TabularConverter plugin.""" + return pp.get_plugin("TabularConverter", "0.1.2-dev1") + + +@pytest.fixture(scope="session") +def cwl_io_path(tmp_path_factory): + """Temp CWL IO path.""" + return tmp_path_factory.mktemp("io") / "tabularconverter_io.yml" + + +@pytest.fixture(scope="session") +def cwl_path(tmp_path_factory): + """Temp CWL IO path.""" + return tmp_path_factory.mktemp("cwl") / "tabularconverter.cwl" + + +@pytest.fixture +def cwl_io(plug, cwl_io_path): + """Test save_cwl IO.""" + rs_path = RSRC_PATH.absolute() + plug.inpDir = rs_path + plug.filePattern = ".*.csv" + plug.fileExtension = ".arrow" + plug.outDir = rs_path + plug.save_cwl_io(cwl_io_path) + + +def test_save_read_cwl(plug, cwl_path): + """Test save and read cwl.""" + plug.save_cwl(cwl_path) + with open(cwl_path, encoding="utf-8") as file: + src_cwl = file.read() + with open(RSRC_PATH.joinpath("target1.cwl"), encoding="utf-8") as file: + target_cwl = file.read() + assert src_cwl == target_cwl + + +def test_save_cwl_io_not_inp(plug, cwl_io_path): + """Test save_cwl IO.""" + with pytest.raises(MissingInputValuesError): + plug.save_cwl_io(cwl_io_path) + + +def test_save_cwl_io_not_inp2(plug, cwl_io_path): + """Test save_cwl IO.""" + plug.inpDir = RSRC_PATH.absolute() + plug.filePattern = "img_r{rrr}_c{ccc}.tif" + with pytest.raises(MissingInputValuesError): + plug.save_cwl_io(cwl_io_path) + + +def test_save_cwl_io_not_yml(plug, cwl_io_path): + """Test save_cwl IO.""" + plug.inpDir = RSRC_PATH.absolute() + plug.filePattern = ".*.csv" + plug.fileExtension = ".arrow" + plug.outDir = RSRC_PATH.absolute() + with pytest.raises(ValueError): + plug.save_cwl_io(cwl_io_path.with_suffix(".txt")) + + +def test_read_cwl_io(cwl_io, cwl_io_path): + """Test read_cwl_io.""" + with open(cwl_io_path, encoding="utf-8") as file: + src_io = yaml.safe_load(file) + assert src_io["inpDir"] == { + "class": "Directory", + "location": str(RSRC_PATH.absolute()), + } + assert src_io["outDir"] == { + "class": "Directory", + "location": str(RSRC_PATH.absolute()), + } + assert src_io["filePattern"] == ".*.csv" + assert src_io["fileExtension"] == ".arrow" diff --git a/tests/test_io.py b/tests/test_io.py new file mode 100644 index 0000000..5686e0e --- /dev/null +++ b/tests/test_io.py @@ -0,0 +1,69 @@ +# pylint: disable=C0103 +"""IO Tests.""" +from pathlib import Path + +import pytest +from fsspec.implementations.local import LocalFileSystem + +from polus.tabular._plugins.classes import _load_plugin +from polus.tabular._plugins.classes.plugin_base import IOKeyError +from polus.tabular._plugins.io import Input, IOBase + +RSRC_PATH = Path(__file__).parent.joinpath("resources") + +io1 = { + "type": "collection", + "name": "input1", + "required": True, + "description": "Test IO", +} +io2 = {"type": "boolean", "name": "input2", "required": True, "description": "Test IO"} +iob1 = { + "type": "collection", +} +plugin = _load_plugin(RSRC_PATH.joinpath("g1.json")) + + +def test_iobase(): + """Test IOBase.""" + IOBase(**iob1) + + +@pytest.mark.parametrize("io", [io1, io2], ids=["io1", "io2"]) +def test_input(io): + """Test Input.""" + Input(**io) + + +def test_set_attr_invalid1(): + """Test setting invalid attribute.""" + with pytest.raises(TypeError): + plugin.inputs[0].examples = [2, 5] + + +def test_set_attr_invalid2(): + """Test setting invalid attribute.""" + with pytest.raises(IOKeyError): + plugin.invalid = False + + +def test_set_attr_valid1(): + """Test setting valid attribute.""" + i = [x for x in plugin.inputs if x.name == "darkfield"] + i[0].value = True + + +def test_set_attr_valid2(): + """Test setting valid attribute.""" + plugin.darkfield = True + + +def test_set_fsspec(): + """Test setting fs valid attribute.""" + plugin._fs = LocalFileSystem() # pylint: disable=protected-access + + +def test_set_fsspec2(): + """Test setting fs invalid attribute.""" + with pytest.raises(ValueError): + plugin._fs = "./" # pylint: disable=protected-access diff --git a/tests/test_manifests.py b/tests/test_manifests.py new file mode 100644 index 0000000..52f2ea9 --- /dev/null +++ b/tests/test_manifests.py @@ -0,0 +1,236 @@ +# pylint: disable=C0103 +"""Test manifests utils.""" +from collections import OrderedDict +from pathlib import Path + +import pytest + +from polus.tabular._plugins.classes import PLUGINS, list_plugins +from polus.tabular._plugins.manifests import ( + InvalidManifestError, + _load_manifest, + validate_manifest, +) +from polus.tabular._plugins.models import ComputeSchema, WIPPPluginManifest + +RSRC_PATH = Path(__file__).parent.joinpath("resources") + +d_val = { + "name": "BaSiC Flatfield Correction Plugin", + "version": "1.2.7", + "title": "Flatfield correction using BaSiC algorithm.", + "description": "Generates images used for flatfield correction using the BaSiC algorithm.", + "author": "Nick Schaub (nick.schaub@nih.gov)", + "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/polusai/polus-plugins", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": 'Peng et al. "A BaSiC tool for background and shading correction of optical microscopy images" Nature Communications (2017)', + "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", + "inputs": [ + { + "name": "inpDir", + "type": "collection", + "description": "Input image collection.", + "required": True, + }, + { + "name": "filePattern", + "type": "string", + "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", + "required": True, + }, + { + "name": "darkfield", + "type": "boolean", + "description": "Calculate darkfield image.", + "required": True, + }, + { + "name": "photobleach", + "type": "boolean", + "description": "Calculate photobleaching offsets.", + "required": True, + }, + { + "name": "groupBy", + "type": "string", + "description": "Group images together for flatfield by variable.", + "required": False, + }, + ], + "outputs": [ + { + "name": "outDir", + "type": "collection", + "description": "Output data for the plugin", + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input image collection: ", + "description": "Image collection...", + }, + { + "key": "inputs.filePattern", + "title": "Filename pattern: ", + "description": "Use a filename pattern to calculate flatfield information by subsets", + }, + { + "key": "inputs.groupBy", + "title": "Grouping Variables: ", + "description": "Group data together with varying variable values.", + }, + { + "key": "inputs.darkfield", + "title": "Calculate darkfield: ", + "description": "If selected, will generate a darkfield image", + }, + { + "key": "inputs.photobleach", + "title": "Calclate photobleaching offset: ", + "description": "If selected, will generate an offset scalar for each image", + }, + ], +} + +test_dict_load = OrderedDict( + { + "dictionary": { + "name": "BaSiC Flatfield Correction Plugin", + "version": "1.2.7", + "title": "Flatfield correction using BaSiC algorithm.", + "description": "Generates images used for flatfield correction using the BaSiC algorithm.", + "author": "Nick Schaub (nick.schaub@nih.gov)", + "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/polusai/polus-plugins", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": 'Peng et al. "A BaSiC tool for background and shading correction of optical microscopy images" Nature Communications (2017)', + "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", + "inputs": [ + { + "name": "inpDir", + "type": "collection", + "description": "Input image collection.", + "required": True, + }, + { + "name": "filePattern", + "type": "string", + "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", + "required": True, + }, + { + "name": "darkfield", + "type": "boolean", + "description": "Calculate darkfield image.", + "required": True, + }, + { + "name": "photobleach", + "type": "boolean", + "description": "Calculate photobleaching offsets.", + "required": True, + }, + { + "name": "groupBy", + "type": "string", + "description": "Group images together for flatfield by variable.", + "required": False, + }, + ], + "outputs": [ + { + "name": "outDir", + "type": "collection", + "description": "Output data for the plugin", + } + ], + "ui": [ + { + "key": "inputs.inpDir", + "title": "Input image collection: ", + "description": "Image collection...", + }, + { + "key": "inputs.filePattern", + "title": "Filename pattern: ", + "description": "Use a filename pattern to calculate flatfield information by subsets", + }, + { + "key": "inputs.groupBy", + "title": "Grouping Variables: ", + "description": "Group data together with varying variable values.", + }, + { + "key": "inputs.darkfield", + "title": "Calculate darkfield: ", + "description": "If selected, will generate a darkfield image", + }, + { + "key": "inputs.photobleach", + "title": "Calclate photobleaching offset: ", + "description": "If selected, will generate an offset scalar for each image", + }, + ], + }, + "path": RSRC_PATH.joinpath("g1.json"), + } +) + +REPO_PATH = RSRC_PATH.parent.parent +LOCAL_MANIFESTS = list(REPO_PATH.rglob("*plugin.json")) +LOCAL_MANIFESTS = [ + x for x in LOCAL_MANIFESTS if "cookiecutter.project" not in str(x) +] # filter cookiecutter templates +LOCAL_MANIFEST_NAMES = [str(x) for x in LOCAL_MANIFESTS] + + +def _get_path(manifest): + """Return path of local plugin manifest.""" + return PLUGINS[manifest][max(PLUGINS[manifest])] + + +# @pytest.mark.repo +# @pytest.mark.parametrize("manifest", LOCAL_MANIFESTS, ids=LOCAL_MANIFEST_NAMES) +# def test_manifests_local(manifest): +# """Test local (repo) manifests.""" +# assert isinstance(validate_manifest(manifest), (WIPPPluginManifest, ComputeSchema)) + + +def test_list_plugins(): + """Test `list_plugins()`.""" + o = list(PLUGINS.keys()) + o.sort() + assert o == list_plugins() + + +@pytest.mark.parametrize("manifest", list_plugins(), ids=list_plugins()) +def test_manifests_plugindir(manifest): + """Test manifests available in polus-plugins installation dir.""" + p = _get_path(manifest) + assert isinstance(validate_manifest(p), (WIPPPluginManifest, ComputeSchema)) + + +@pytest.mark.parametrize("type_", test_dict_load.values(), ids=test_dict_load.keys()) +def test_load_manifest(type_): # test path and dict + """Test _load_manifest() for types path and dict.""" + assert _load_manifest(type_) == d_val + + +bad = [f"b{x}.json" for x in [1, 2, 3]] +good = [f"g{x}.json" for x in [1, 2, 3]] + + +@pytest.mark.parametrize("manifest", bad, ids=bad) +def test_bad_manifest(manifest): + """Test bad manifests raise InvalidManifest error.""" + with pytest.raises(InvalidManifestError): + validate_manifest(REPO_PATH.joinpath("tests", "resources", manifest)) + + +@pytest.mark.parametrize("manifest", good, ids=good) +def test_good_manifest(manifest): + """Test different manifests that all should pass validation.""" + p = RSRC_PATH.joinpath(manifest) + assert isinstance(validate_manifest(p), (WIPPPluginManifest, ComputeSchema)) diff --git a/tests/test_plugins.py b/tests/test_plugins.py new file mode 100644 index 0000000..0e02dd6 --- /dev/null +++ b/tests/test_plugins.py @@ -0,0 +1,198 @@ +# type: ignore +# pylint: disable=C0116, W0621, W0613 +"""Plugin Object Tests.""" +from pathlib import Path + +import pytest + +import polus.tabular as pp +from polus.tabular._plugins.classes import Plugin, _load_plugin + +RSRC_PATH = Path(__file__).parent.joinpath("resources") +OMECONVERTER = RSRC_PATH.joinpath("omeconverter022.json") +BASIC_131 = ( + "https://raw.githubusercontent.com/PolusAI/polus-plugins/" + "e8f23a3661e3e5f7ad7dc92f4b0d9c31e7076589/regression/" + "polus-basic-flatfield-correction-plugin/plugin.json" +) +BASIC_127 = ( + "https://raw.githubusercontent.com/PolusAI/polus-plugins/" + "440e64a51a578e21b574009424a75c848ebbbb03/regression/polus-basic" + "-flatfield-correction-plugin/plugin.json" +) + + +@pytest.fixture +def remove_all(): + """Remove all plugins.""" + pp.remove_all() + + +def test_empty_list(remove_all): + """Test empty list.""" + assert pp.list == [] + + +def test_submit_plugin(remove_all): + """Test submit_plugin.""" + pp.submit_plugin(OMECONVERTER) + assert pp.list == ["OmeConverter"] + + +@pytest.fixture +def submit_omeconverter(): + pp.submit_plugin(OMECONVERTER) + + +@pytest.fixture +def submit_basic131(): + pp.submit_plugin(BASIC_131) + + +@pytest.fixture +def submit_basic127(): + pp.submit_plugin(BASIC_127) + + +def test_get_plugin(submit_omeconverter): + """Test get_plugin.""" + assert isinstance(pp.get_plugin("OmeConverter"), Plugin) + + +def test_url1(submit_omeconverter, submit_basic131): + """Test url submit.""" + assert sorted(pp.list) == ["BasicFlatfieldCorrectionPlugin", "OmeConverter"] + + +def test_url2(submit_omeconverter, submit_basic131, submit_basic127): + """Test url submit.""" + assert sorted(pp.list) == ["BasicFlatfieldCorrectionPlugin", "OmeConverter"] + + +def test_load_plugin(submit_omeconverter): + """Test load_plugin.""" + assert _load_plugin(OMECONVERTER).name == "OME Converter" + + +def test_load_plugin2(submit_basic131): + """Test load_plugin.""" + assert _load_plugin(BASIC_131).name == "BaSiC Flatfield Correction Plugin" + + +def test_attr1(submit_omeconverter): + """Test attributes.""" + p_attr = pp.OmeConverter + p_get = pp.get_plugin("OmeConverter") + for attr in p_get.__dict__: + if attr == "id": + continue + assert getattr(p_attr, attr) == getattr(p_get, attr) + + +def test_attr2(submit_basic131): + """Test attributes.""" + p_attr = pp.BasicFlatfieldCorrectionPlugin + p_get = pp.get_plugin("BasicFlatfieldCorrectionPlugin") + for attr in p_get.__dict__: + if attr == "id": + continue + assert getattr(p_attr, attr) == getattr(p_get, attr) + + +def test_versions(submit_basic131, submit_basic127): + """Test versions.""" + assert sorted(pp.get_plugin("BasicFlatfieldCorrectionPlugin").versions) == [ + "1.2.7", + "1.3.1", + ] + + +def test_get_max_version1(submit_basic131, submit_basic127): + """Test get max version.""" + plug = pp.get_plugin("BasicFlatfieldCorrectionPlugin") + assert plug.version == "1.3.1" + + +def test_get_max_version2(submit_basic131, submit_basic127): + """Test get max version.""" + plug = pp.BasicFlatfieldCorrectionPlugin + assert plug.version == "1.3.1" + + +def test_get_specific_version(submit_basic131, submit_basic127): + """Test get specific version.""" + plug = pp.get_plugin("BasicFlatfieldCorrectionPlugin", "1.2.7") + assert plug.version == "1.2.7" + + +def test_remove_version(submit_basic131, submit_basic127): + """Test remove version.""" + pp.remove_plugin("BasicFlatfieldCorrectionPlugin", "1.2.7") + assert pp.BasicFlatfieldCorrectionPlugin.versions == ["1.3.1"] + + +def test_remove_all_versions_plugin( + submit_basic131, submit_basic127, submit_omeconverter +): + """Test remove all versions plugin.""" + pp.remove_plugin("BasicFlatfieldCorrectionPlugin") + assert pp.list == ["OmeConverter"] + + +def test_submit_str_1(): + """Test submit_plugin with string.""" + pp.remove_all() + pp.submit_plugin(str(OMECONVERTER)) + assert pp.list == ["OmeConverter"] + + +def test_submit_str_2(): + """Test submit_plugin with string.""" + pp.remove_all() + pp.submit_plugin(str(OMECONVERTER.absolute())) + assert pp.list == ["OmeConverter"] + + +@pytest.fixture +def plug1(): + """Configure the class.""" + pp.submit_plugin(BASIC_131) + plug1 = pp.BasicFlatfieldCorrectionPlugin + plug1.inpDir = RSRC_PATH.absolute() + plug1.outDir = RSRC_PATH.absolute() + plug1.filePattern = "*.ome.tif" + plug1.darkfield = True + plug1.photobleach = False + return plug1 + + +@pytest.fixture(scope="session") +def config_path(tmp_path_factory): + """Temp config path.""" + return tmp_path_factory.mktemp("config") / "config1.json" + + +def test_save_load_config(plug1, config_path): + """Test save_config, load_config from config file.""" + plug1.save_config(config_path) + plug2 = pp.load_config(config_path) + for i_o in ["inpDir", "outDir", "filePattern"]: + assert getattr(plug2, i_o) == getattr(plug1, i_o) + assert plug2.id == plug1.id + + +def test_load_config_no_plugin(plug1, config_path): + """Test load_config after removing plugin.""" + plug1.save_config(config_path) + plug1_id = plug1.id + pp.remove_plugin("BasicFlatfieldCorrectionPlugin") + assert pp.list == ["OmeConverter"] + plug2 = pp.load_config(config_path) + assert isinstance(plug2, Plugin) + assert plug2.id == plug1_id + + +def test_remove_all(submit_basic131, submit_basic127, submit_omeconverter): + """Test remove_all.""" + pp.remove_all() + assert pp.list == [] diff --git a/tests/test_version.py b/tests/test_version.py new file mode 100644 index 0000000..346b67b --- /dev/null +++ b/tests/test_version.py @@ -0,0 +1,171 @@ +"""Test Version object and cast_version utility function.""" +import pydantic +import pytest +from pydantic import ValidationError + +from polus.tabular._plugins.io import Version +from polus.tabular._plugins.utils import cast_version + +PYDANTIC_VERSION = pydantic.__version__.split(".", maxsplit=1)[0] + +GOOD_VERSIONS = [ + "1.2.3", + "1.4.7-rc1", + "4.1.5", + "12.8.3", + "10.2.0", + "2.2.3-dev5", + "0.3.4", + "0.2.34-rc23", +] +BAD_VERSIONS = ["02.2.3", "002.2.3", "1.2", "1.0", "1.03.2", "23.3.03", "d.2.4"] + +PV = PYDANTIC_VERSION +print(PV) + + +@pytest.mark.parametrize("ver", GOOD_VERSIONS, ids=GOOD_VERSIONS) +def test_version(ver): + """Test Version pydantic model.""" + if PV == "1": + assert isinstance(Version(version=ver), Version) + assert isinstance(Version(ver), Version) + + +@pytest.mark.skipif(int(PV) > 1, reason="requires pydantic 1") +@pytest.mark.parametrize("ver", GOOD_VERSIONS, ids=GOOD_VERSIONS) +def test_cast_version(ver): + """Test cast_version utility function.""" + assert isinstance(cast_version(ver), Version) + + +@pytest.mark.parametrize("ver", BAD_VERSIONS, ids=BAD_VERSIONS) +def test_bad_version1(ver): + """Test ValidationError is raised for invalid versions.""" + if PV == "1": + with pytest.raises(ValidationError): + assert isinstance(cast_version(ver), Version) + with pytest.raises(ValidationError): + assert isinstance(Version(ver), Version) + + +MAJOR_VERSION_EQUAL = ["2.4.3", "2.98.28", "2.1.2", "2.0.0", "2.4.0"] +MINOR_VERSION_EQUAL = ["1.3.3", "7.3.4", "98.3.12", "23.3.0", "1.3.5"] +PATCH_EQUAL = ["12.2.7", "2.3.7", "1.7.7", "7.7.7", "7.29.7"] + + +@pytest.mark.parametrize("ver", MAJOR_VERSION_EQUAL, ids=MAJOR_VERSION_EQUAL) +def test_major(ver): + """Test major version.""" + if PV == "2": + assert Version(ver).major == 2 + else: + assert cast_version(ver).major == 2 + + +@pytest.mark.parametrize("ver", MINOR_VERSION_EQUAL, ids=MINOR_VERSION_EQUAL) +def test_minor(ver): + """Test minor version.""" + if PV == "2": + assert Version(ver).minor == 3 + else: + assert cast_version(ver).minor == 3 + + +@pytest.mark.parametrize("ver", PATCH_EQUAL, ids=PATCH_EQUAL) +def test_patch(ver): + """Test patch version.""" + if PV == "2": + assert Version(ver).patch == 7 + else: + assert cast_version(ver).patch == 7 + + +def test_gt1(): + """Test greater than operator.""" + if PV == "2": + assert Version("1.2.3") > Version("1.2.1") + else: + assert cast_version("1.2.3") > cast_version("1.2.1") + + +def test_gt2(): + """Test greater than operator.""" + if PV == "2": + assert Version("5.7.3") > Version("5.6.3") + else: + assert cast_version("5.7.3") > cast_version("5.6.3") + + +def test_st1(): + """Test less than operator.""" + if PV == "2": + assert Version("5.7.3") < Version("5.7.31") + else: + assert cast_version("5.7.3") < cast_version("5.7.31") + + +def test_st2(): + """Test less than operator.""" + if PV == "2": + assert Version("1.0.2") < Version("2.0.2") + else: + assert cast_version("1.0.2") < cast_version("2.0.2") + + +def test_eq1(): + """Test equality operator.""" + if PV == "2": + assert Version("1.3.3") == Version("1.3.3") + else: + assert Version(version="1.3.3") == cast_version("1.3.3") + + +def test_eq2(): + """Test equality operator.""" + if PV == "2": + assert Version("5.4.3") == Version("5.4.3") + else: + assert Version(version="5.4.3") == cast_version("5.4.3") + + +def test_eq3(): + """Test equality operator.""" + if PV == "2": + assert Version("1.3.3") != Version("1.3.8") + else: + assert Version(version="1.3.3") != cast_version("1.3.8") + + +def test_eq_str1(): + """Test equality with str.""" + if PV == "2": + assert Version("1.3.3") == "1.3.3" + else: + assert Version(version="1.3.3") == "1.3.3" + + +def test_lt_str1(): + """Test equality with str.""" + if PV == "2": + assert Version("1.3.3") < "1.5.3" + else: + assert Version(version="1.3.3") < "1.5.3" + + +def test_gt_str1(): + """Test equality with str.""" + if PV == "2": + assert Version("4.5.10") > "4.5.9" + else: + assert Version(version="4.5.10") > "4.5.9" + + +def test_eq_no_str(): + """Test equality with non-string.""" + if PV == "2": + with pytest.raises(TypeError): + assert Version("1.3.3") == 1.3 + else: + with pytest.raises(TypeError): + assert Version(version="1.3.3") == 1.3 diff --git a/transforms/tabular-merger-tool/.bumpversion.cfg b/transforms/tabular-merger-tool/.bumpversion.cfg index def8097..90eed98 100644 --- a/transforms/tabular-merger-tool/.bumpversion.cfg +++ b/transforms/tabular-merger-tool/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.3-dev2 +current_version = 0.1.3-dev3 commit = True tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? @@ -24,4 +24,6 @@ replace = version = "{new_version}" [bumpversion:file:VERSION] -[bumpversion:file:src/polus/images/transforms/tabular/tabular_merger/__init__.py] +[bumpversion:file:README.md] + +[bumpversion:file:src/polus/tabular/transforms/tabular_merger/__init__.py] diff --git a/transforms/tabular-merger-tool/Dockerfile b/transforms/tabular-merger-tool/Dockerfile index a851ae2..07c61b6 100755 --- a/transforms/tabular-merger-tool/Dockerfile +++ b/transforms/tabular-merger-tool/Dockerfile @@ -1,4 +1,4 @@ -FROM polusai/bfio:2.1.9 +FROM polusai/bfio:2.3.6 # environment variables defined in polusai/bfio ENV EXEC_DIR="/opt/executables" @@ -12,10 +12,9 @@ WORKDIR ${EXEC_DIR} COPY pyproject.toml ${EXEC_DIR} COPY VERSION ${EXEC_DIR} COPY README.md ${EXEC_DIR} -RUN pip3 install --index-url https://test.pypi.org/simple/ filepattern==2.2.7 COPY src ${EXEC_DIR}/src RUN pip3 install ${EXEC_DIR} --no-cache-dir -ENTRYPOINT ["python3", "-m", "polus.images.transforms.tabular.tabular_merger"] +ENTRYPOINT ["python3", "-m", "polus.tabular.transforms.tabular_merger"] CMD ["--help"] diff --git a/transforms/tabular-merger-tool/README.md b/transforms/tabular-merger-tool/README.md index 3d42224..bd034f9 100644 --- a/transforms/tabular-merger-tool/README.md +++ b/transforms/tabular-merger-tool/README.md @@ -1,4 +1,4 @@ -# Tabular Merger (v0.1.0) +# Tabular Merger (v0.1.3-dev3) This WIPP plugin merges all tabular files with vaex supported file formats into a combined file using either row or column merging. diff --git a/transforms/tabular-merger-tool/VERSION b/transforms/tabular-merger-tool/VERSION index df5163a..e28037e 100644 --- a/transforms/tabular-merger-tool/VERSION +++ b/transforms/tabular-merger-tool/VERSION @@ -1 +1 @@ -0.1.3-dev2 +0.1.3-dev3 diff --git a/transforms/tabular-merger-tool/plugin.json b/transforms/tabular-merger-tool/plugin.json index 60c401c..a665a9a 100644 --- a/transforms/tabular-merger-tool/plugin.json +++ b/transforms/tabular-merger-tool/plugin.json @@ -1,18 +1,18 @@ { "name": "Tabular Merger", - "version": "0.1.3-dev2", + "version": "0.1.3-dev3", "title": "Tabular Merger", "description": "Merge vaex supported tabular file format into a single merged file.", "author": "Nicholas Schaub (nick.schaub@nih.gov), Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov)", "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/PolusAI/polus-plugins", + "repository": "https://github.com/PolusAI/tabular-tools", "website": "https://ncats.nih.gov/preclinical/core/informatics", "citation": "", - "containerId": "polusai/tabular-merger-tool:0.1.3-dev2", + "containerId": "polusai/tabular-merger-tool:0.1.3-dev3", "baseCommand": [ "python3", "-m", - "polus.images.transforms.tabular.tabular_merger" + "polus.tabular.transforms.tabular_merger" ], "inputs": [ { @@ -111,4 +111,4 @@ "description": "Column name use to merge files" } ] -} +} \ No newline at end of file diff --git a/transforms/tabular-merger-tool/pyproject.toml b/transforms/tabular-merger-tool/pyproject.toml index 34beb73..9d3ef0f 100644 --- a/transforms/tabular-merger-tool/pyproject.toml +++ b/transforms/tabular-merger-tool/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] -name = "polus-images-transforms-tabular-tabular-merger" -version = "0.1.3-dev2" +name = "polus-tabular-transforms-tabular-merger" +version = "0.1.3-dev3" description = "Merge vaex supported tabular file format into a single merged file." authors = [ "Nick Schaub ", @@ -11,14 +11,12 @@ packages = [{include = "polus", from = "src"}] [tool.poetry.dependencies] python = ">=3.9" -filepattern = "^2.0.0" typer = "^0.7.0" blake3 = "^0.3.3" llvmlite = "^0.39.1" -fastapi = "^0.92.0" -astropy = "5.2.1" vaex = "^4.17.0" tqdm = "^4.65.0" +filepattern = "^2.0.5" [tool.poetry.group.dev.dependencies] diff --git a/transforms/tabular-merger-tool/src/polus/images/transforms/tabular/tabular_merger/__init__.py b/transforms/tabular-merger-tool/src/polus/tabular/transforms/tabular_merger/__init__.py similarity index 65% rename from transforms/tabular-merger-tool/src/polus/images/transforms/tabular/tabular_merger/__init__.py rename to transforms/tabular-merger-tool/src/polus/tabular/transforms/tabular_merger/__init__.py index 0a87e66..77291e3 100644 --- a/transforms/tabular-merger-tool/src/polus/images/transforms/tabular/tabular_merger/__init__.py +++ b/transforms/tabular-merger-tool/src/polus/tabular/transforms/tabular_merger/__init__.py @@ -1,4 +1,4 @@ """Tabular Merger.""" -__version__ = "0.1.3-dev2" +__version__ = "0.1.3-dev3" from . import tabular_merger diff --git a/transforms/tabular-merger-tool/src/polus/images/transforms/tabular/tabular_merger/__main__.py b/transforms/tabular-merger-tool/src/polus/tabular/transforms/tabular_merger/__main__.py similarity index 95% rename from transforms/tabular-merger-tool/src/polus/images/transforms/tabular/tabular_merger/__main__.py rename to transforms/tabular-merger-tool/src/polus/tabular/transforms/tabular_merger/__main__.py index 648ad41..55bdbdf 100644 --- a/transforms/tabular-merger-tool/src/polus/images/transforms/tabular/tabular_merger/__main__.py +++ b/transforms/tabular-merger-tool/src/polus/tabular/transforms/tabular_merger/__main__.py @@ -9,7 +9,7 @@ import filepattern as fp import typer -from polus.images.transforms.tabular.tabular_merger import tabular_merger as tm +from polus.tabular.transforms.tabular_merger import tabular_merger as tm app = typer.Typer() @@ -18,7 +18,7 @@ format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", datefmt="%d-%b-%y %H:%M:%S", ) -logger = logging.getLogger("polus.images.transforms.tabular_merger") +logger = logging.getLogger("polus.tabular.transforms.tabular_merger") logger.setLevel(os.environ.get("POLUS_LOG", logging.INFO)) POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".arrow") diff --git a/transforms/tabular-merger-tool/src/polus/images/transforms/tabular/tabular_merger/tabular_merger.py b/transforms/tabular-merger-tool/src/polus/tabular/transforms/tabular_merger/tabular_merger.py similarity index 100% rename from transforms/tabular-merger-tool/src/polus/images/transforms/tabular/tabular_merger/tabular_merger.py rename to transforms/tabular-merger-tool/src/polus/tabular/transforms/tabular_merger/tabular_merger.py diff --git a/transforms/tabular-merger-tool/tests/test_main.py b/transforms/tabular-merger-tool/tests/test_main.py index 90c9f58..bf05f04 100644 --- a/transforms/tabular-merger-tool/tests/test_main.py +++ b/transforms/tabular-merger-tool/tests/test_main.py @@ -8,7 +8,7 @@ import pandas as pd import pytest import vaex -from polus.images.transforms.tabular.tabular_merger import tabular_merger as tm +from polus.tabular.transforms.tabular_merger import tabular_merger as tm class Generatedata: diff --git a/transforms/tabular-thresholding-tool/.bumpversion.cfg b/transforms/tabular-thresholding-tool/.bumpversion.cfg index 695dc97..dffe7ad 100644 --- a/transforms/tabular-thresholding-tool/.bumpversion.cfg +++ b/transforms/tabular-thresholding-tool/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.6-dev0 +current_version = 0.1.6-dev1 commit = True tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? @@ -22,6 +22,12 @@ replace = version = "{new_version}" [bumpversion:file:plugin.json] +[bumpversion:file:README.md] + +[bumpversion:file:ict.yaml] + +[bumpversion:file:tabular-thresholding-plugin.cwl] + [bumpversion:file:VERSION] -[bumpversion:file:src/polus/images/transforms/tabular/tabular_thresholding/__init__.py] +[bumpversion:file:src/polus/tabular/transforms/tabular_thresholding/__init__.py] diff --git a/transforms/tabular-thresholding-tool/Dockerfile b/transforms/tabular-thresholding-tool/Dockerfile index 9943e2e..e17cf7e 100644 --- a/transforms/tabular-thresholding-tool/Dockerfile +++ b/transforms/tabular-thresholding-tool/Dockerfile @@ -1,4 +1,4 @@ -FROM polusai/bfio:2.1.9 +FROM polusai/bfio:2.3.6 # environment variables defined in polusai/bfio ENV EXEC_DIR="/opt/executables" @@ -16,5 +16,5 @@ COPY src ${EXEC_DIR}/src RUN pip3 install ${EXEC_DIR} --no-cache-dir -ENTRYPOINT ["python3", "-m", "polus.images.transforms.tabular.tabular_thresholding"] +ENTRYPOINT ["python3", "-m", "polus.tabular.transforms.tabular_thresholding"] CMD ["--help"] diff --git a/transforms/tabular-thresholding-tool/README.md b/transforms/tabular-thresholding-tool/README.md index f79831f..6f7f169 100644 --- a/transforms/tabular-thresholding-tool/README.md +++ b/transforms/tabular-thresholding-tool/README.md @@ -1,4 +1,4 @@ -# Tabular Thresholding Plugin (v0.1.3) +# Tabular Thresholding Plugin (v0.1.6-dev1) This plugin uses three [threshold methods](https://github.com/nishaq503/thresholding.git) to compute threshold values on a user-defined variable and then determines if each label (ROI) is above or below the calculated threshold value. A new feature column will be computed for selected threshold method with the values in binary format (0, 1) \ *0* `negative or below threshold`\ *1* `positive or above threshold` diff --git a/transforms/tabular-thresholding-tool/VERSION b/transforms/tabular-thresholding-tool/VERSION index 9518919..aeb7350 100644 --- a/transforms/tabular-thresholding-tool/VERSION +++ b/transforms/tabular-thresholding-tool/VERSION @@ -1 +1 @@ -0.1.6-dev0 +0.1.6-dev1 diff --git a/transforms/tabular-thresholding-tool/ict.yaml b/transforms/tabular-thresholding-tool/ict.yaml index 4c3dd58..1b6445d 100644 --- a/transforms/tabular-thresholding-tool/ict.yaml +++ b/transforms/tabular-thresholding-tool/ict.yaml @@ -2,10 +2,10 @@ author: - Hamdah Shafqat - Najib Ishaq contact: hamdahshafqat.abbasi@nih.gov -container: polusai/tabular-thresholding-tool:0.1.6-dev0 +container: polusai/tabular-thresholding-tool:0.1.6-dev1 description: This plugin computes thresholds using three methods and apply thresholds on each labelled data to produce binary outputs -entrypoint: python3 -m polus.images.transforms.tabular.tabular_thresholding +entrypoint: python3 -m polus.tabular.transforms.tabular_thresholding inputs: - description: Directory containing tabular data format: @@ -77,7 +77,7 @@ outputs: name: outDir required: true type: path -repository: https://github.com/PolusAI/polus-plugins +repository: https://github.com/PolusAI/tabular-tools specVersion: 1.0.0 title: tabular-thresholding-plugin ui: @@ -143,4 +143,4 @@ ui: key: inputs.outFormat title: outFormat type: select -version: 0.1.6-dev0 +version: 0.1.6-dev1 diff --git a/transforms/tabular-thresholding-tool/plugin.json b/transforms/tabular-thresholding-tool/plugin.json index d1abdff..c564a66 100644 --- a/transforms/tabular-thresholding-tool/plugin.json +++ b/transforms/tabular-thresholding-tool/plugin.json @@ -1,18 +1,18 @@ { "name": "tabular-thresholding-plugin", - "version": "0.1.6-dev0", + "version": "0.1.6-dev1", "title": "tabular-thresholding-plugin", "description": "This plugin computes thresholds using three methods and apply thresholds on each labelled data to produce binary outputs", "author": "Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov), Najib Ishaq (najib.ishaq@nih.gov)", "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/PolusAI/polus-plugins", + "repository": "https://github.com/PolusAI/tabular-tools", "website": "https://ncats.nih.gov/preclinical/core/informatics", "citation": "", - "containerId": "polusai/tabular-thresholding-tool:0.1.6-dev0", + "containerId": "polusai/tabular-thresholding-tool:0.1.6-dev1", "baseCommand": [ "python3", "-m", - "polus.images.transforms.tabular.tabular_thresholding" + "polus.tabular.transforms.tabular_thresholding" ], "inputs": [ { @@ -174,4 +174,4 @@ "default": ".arrow" } ] -} +} \ No newline at end of file diff --git a/transforms/tabular-thresholding-tool/pyproject.toml b/transforms/tabular-thresholding-tool/pyproject.toml index e14b17c..56b5d5f 100644 --- a/transforms/tabular-thresholding-tool/pyproject.toml +++ b/transforms/tabular-thresholding-tool/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] -name = "polus-images-transforms-tabular-tabular-thresholding" -version = "0.1.6-dev0" +name = "polus-tabular-transforms-tabular-thresholding" +version = "0.1.6-dev1" description = "This plugin computes thresholds using three methods and apply thresholds on each labelled data to produce binary outputs." authors = [ "Hamdah Shafqat Abbasi ", @@ -14,10 +14,6 @@ python = ">=3.9" filepattern = "^2.0.4" typer = "^0.7.0" tqdm = "^4.64.1" -blake3 = "^0.3.3" -llvmlite = "^0.39.1" -fastapi = "^0.92.0" -astropy = "5.2.1" vaex = "^4.17.0" diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/__init__.py b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/__init__.py similarity index 70% rename from transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/__init__.py rename to transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/__init__.py index cc71cde..e9f89c8 100644 --- a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/__init__.py +++ b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/__init__.py @@ -1,4 +1,4 @@ """Tabular Thresholding.""" -__version__ = "0.1.6-dev0" +__version__ = "0.1.6-dev1" from . import tabular_thresholding diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/__main__.py b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/__main__.py similarity index 97% rename from transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/__main__.py rename to transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/__main__.py index c88c2aa..f6a00bb 100644 --- a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/__main__.py +++ b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/__main__.py @@ -12,7 +12,7 @@ import filepattern as fp import typer -from polus.images.transforms.tabular.tabular_thresholding import ( +from polus.tabular.transforms.tabular_thresholding import ( tabular_thresholding as tt, ) @@ -22,7 +22,7 @@ datefmt="%d-%b-%y %H:%M:%S", level=logging.INFO, ) -logger = logging.getLogger("polus.images.transforms.tabular.tabular_thresholding") +logger = logging.getLogger("polus.tabular.tabular_thresholding") logger.setLevel(os.environ.get("POLUS_LOG", logging.INFO)) app = typer.Typer() diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/tabular_thresholding.py b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/tabular_thresholding.py similarity index 98% rename from transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/tabular_thresholding.py rename to transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/tabular_thresholding.py index d00ec2b..0033cf2 100644 --- a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/tabular_thresholding.py +++ b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/tabular_thresholding.py @@ -104,7 +104,7 @@ def thresholding_func( else: pos_controls = df[df[pos_control] == 1][var_name].values - neg_controls = df[df[pos_control] == 1][var_name].values + neg_controls = df[df[neg_control] == 1][var_name].values if threshold_type == "fpr": print(threshold_type) diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/__init__.py b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/thresholding/__init__.py similarity index 100% rename from transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/__init__.py rename to transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/thresholding/__init__.py diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/custom_fpr.py b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/thresholding/custom_fpr.py similarity index 100% rename from transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/custom_fpr.py rename to transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/thresholding/custom_fpr.py diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/n_sigma.py b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/thresholding/n_sigma.py similarity index 100% rename from transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/n_sigma.py rename to transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/thresholding/n_sigma.py diff --git a/transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/otsu.py b/transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/thresholding/otsu.py similarity index 100% rename from transforms/tabular-thresholding-tool/src/polus/images/transforms/tabular/tabular_thresholding/thresholding/otsu.py rename to transforms/tabular-thresholding-tool/src/polus/tabular/transforms/tabular_thresholding/thresholding/otsu.py diff --git a/transforms/tabular-thresholding-tool/tabular-thresholding-plugin.cwl b/transforms/tabular-thresholding-tool/tabular-thresholding-plugin.cwl index 012a87c..fecde01 100644 --- a/transforms/tabular-thresholding-tool/tabular-thresholding-plugin.cwl +++ b/transforms/tabular-thresholding-tool/tabular-thresholding-plugin.cwl @@ -52,7 +52,7 @@ outputs: type: Directory requirements: DockerRequirement: - dockerPull: polusai/tabular-thresholding-tool:0.1.6-dev0 + dockerPull: polusai/tabular-thresholding-tool:0.1.6-dev1 InitialWorkDirRequirement: listing: - entry: $(inputs.outDir) diff --git a/transforms/tabular-thresholding-tool/tests/test_main.py b/transforms/tabular-thresholding-tool/tests/test_main.py index 9b5f859..b2a543f 100644 --- a/transforms/tabular-thresholding-tool/tests/test_main.py +++ b/transforms/tabular-thresholding-tool/tests/test_main.py @@ -11,7 +11,7 @@ import pandas as pd import pytest import vaex -from polus.images.transforms.tabular.tabular_thresholding import ( +from polus.tabular.transforms.tabular_thresholding import ( tabular_thresholding as tt, ) diff --git a/utils/polus-python-template/.bumpversion.cfg b/utils/polus-python-template/.bumpversion.cfg index cdd5e56..50aa146 100644 --- a/utils/polus-python-template/.bumpversion.cfg +++ b/utils/polus-python-template/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.1.0-dev0 +current_version = 1.1.0-dev1 commit = False tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? diff --git a/utils/polus-python-template/CHANGELOG.md b/utils/polus-python-template/CHANGELOG.md index 489dc11..0d93e71 100644 --- a/utils/polus-python-template/CHANGELOG.md +++ b/utils/polus-python-template/CHANGELOG.md @@ -7,3 +7,8 @@ # 1.1.0 * Generate plugins following updated [standard guidelines](https://labshare.atlassian.net/wiki/spaces/WIPP/pages/3275980801/Python+Plugin+Standards) + +# 1.1.0-dev1 + +* Updated this package for tabular-tools repo + diff --git a/utils/polus-python-template/README.md b/utils/polus-python-template/README.md index 0218683..4c8d0e5 100644 --- a/utils/polus-python-template/README.md +++ b/utils/polus-python-template/README.md @@ -1,20 +1,20 @@ -# WIPP Plugin Cookie Cutter (for Python) (1.1.0-dev0) +# WIPP Plugin Cookie Cutter (for Python) (v1.1.0-dev1) This repository is a cookie cutter template that creates the basic scaffold structure of a polus plugin and add it to the polus plugins directory structure. ## How to use -1. Clone `polus-plugins` and change to the polus-plugins directory +1. Clone `tabular-tools` and change to the tabular-tools directory 2. `cd /utils/polus-python-template/` 3. (optional) Install poetry if not available. 4. (optional) Create a dedicated environment with conda or venv. 5. Install the dependencies: `poetry install` 6. Ignore changes to `cookiecutter.json` using: `git update-index --assume-unchanged cookiecutter.json` -7. Modify `cookiecutter.json` to include author and plugin information.`plugin_package` should always start with `polus.plugins`. +7. Modify `cookiecutter.json` to include author and plugin information.`plugin_package` should always start with `polus.tabular`. ** NOTE: ** Do not edit values in brackets ({}) as they are edited by cookiecutter directly. Those are automatically generated from the previous entries. If your plugin is called "Awesome Function", then the plugin folder and docker container will have the name `awesome-function-plugin`. -8. Create your plugin skeleton: ` python -m cookiecutter . --no-input` +8. Create your plugin skeleton: `python -m cookiecutter . --no-input` ## Plugin Standard @@ -40,14 +40,14 @@ To install the package : The skeleton code can be run this way : From the plugin's top directory (with the default values): -`python -m polus.plugins1.package1.package2.awesome_function -i /tmp/inp -o /tmp/out` +`python -m polus.tabular.package1.package2.awesome_function -i /tmp/inp -o /tmp/out` This should print some logs with the provided inputs and outputs and return. ## Running tests Plugin's developer should use `pytest`. Some simple tests have been added to the template as examples. -Before submitting a PR to `polus-plugins`, other unit tests should be created and added to the `tests` +Before submitting a PR to `tabular-tools`, other unit tests should be created and added to the `tests` directory. To run tests : diff --git a/utils/polus-python-template/hooks/post_gen_project.py b/utils/polus-python-template/hooks/post_gen_project.py index f3f0ee4..745c331 100644 --- a/utils/polus-python-template/hooks/post_gen_project.py +++ b/utils/polus-python-template/hooks/post_gen_project.py @@ -17,9 +17,9 @@ def create_repository_directories(source_dir): """ Buid the correct directories inside polus-plugins. The directory structure must conforms to the plugin's spec : - dash-separated word in identifier. - - folder hierarchy matches package namespace minus "polus.plugins" + - folder hierarchy matches package namespace minus "polus.tabular" - plugin's folder name reflects the plugin package name but ends with "-plugin" - Ex: polus.plugins.package1.package2.awesome_function becomes + Ex: polus.tabular.package1.package2.awesome_function becomes package1/package2/awesome-function-plugin """ @@ -38,8 +38,8 @@ def create_repository_directories(source_dir): # make sure we replace underscores new_dirs = "{{cookiecutter.plugin_package}}".replace("_", "-") new_dirs = new_dirs.split(".") - # remove polus.plugins so we only keep intermediary directories - # Ex: polus.plugins.package1.package2.awesome_function creates + # remove polus.tabular so we only keep intermediary directories + # Ex: polus.tabular.package1.package2.awesome_function creates # package1/package2/ new_dirs = new_dirs[2:-1] if len(new_dirs) != 0: diff --git a/utils/polus-python-template/hooks/pre_gen_project.py b/utils/polus-python-template/hooks/pre_gen_project.py index 802f5d1..894625a 100644 --- a/utils/polus-python-template/hooks/pre_gen_project.py +++ b/utils/polus-python-template/hooks/pre_gen_project.py @@ -22,12 +22,12 @@ ## TODO check valid plugin_package = "{{ cookiecutter.plugin_package }}" -if not plugin_package.startswith("polus.plugins."): +if not plugin_package.startswith("polus.tabular."): raise ValueError( - f"plugin package must be a child of polus.plugins." - + f"plugin_package must start with 'polus.plugins'. Got : {plugin_package}" + f"plugin package must be a child of polus.tabular." + + f"plugin_package must start with 'polus.tabular'. Got : {plugin_package}" ) -if plugin_package.endswith("_plugin"): +if plugin_package.endswith("_tool"): raise ValueError( f"plugin_package must not ends with _plugin. Got : {plugin_package}" ) @@ -41,10 +41,10 @@ assert not ("_" in project_name) and not ("." in project_name) plugin_slug = "{{ cookiecutter.plugin_slug }}" -assert plugin_slug.startswith("polus-") and plugin_slug.endswith("-plugin") +assert plugin_slug.startswith("polus-") and plugin_slug.endswith("-tool") container_name = "{{ cookiecutter.container_name }}" -assert container_name.endswith("-plugin") +assert container_name.endswith("-tool") container_id = "{{ cookiecutter.container_id }}" assert container_id.startswith("polusai/") diff --git a/utils/polus-python-template/pyproject.toml b/utils/polus-python-template/pyproject.toml index 0052037..d6adbb2 100644 --- a/utils/polus-python-template/pyproject.toml +++ b/utils/polus-python-template/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "polus-python-template" -version = "1.1.0-dev0" +version = "1.1.0-dev1" description = "" authors = ["Nick Schaub ", "Antoine Gerardin "] readme = "README.md" diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile b/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile index dc889b0..b615060 100644 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile @@ -1,4 +1,4 @@ -FROM polusai/bfio:2.1.9 +FROM polusai/bfio:2.3.6 # environment variables defined in polusai/bfio # ENV EXEC_DIR="/opt/executables" diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/plugin.json b/utils/polus-python-template/{{cookiecutter.container_name}}/plugin.json index 69714cb..53ba85e 100644 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/plugin.json +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/plugin.json @@ -1,63 +1,67 @@ { - "name": "{{cookiecutter.plugin_name}}", - "version": "{{cookiecutter.plugin_version}}", - "title": "{{cookiecutter.plugin_name}}", - "description": "{{cookiecutter.plugin_description}}", - "author": "{{cookiecutter.author}} ({{cookiecutter.author_email}})", - "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/labshare/polus-plugins", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "citation": "", - "containerId": "{{cookiecutter.container_id}}:{{cookiecutter.container_version}}", - "baseCommand": ["python3", "-m", "{{cookiecutter.plugin_package}}"], - "inputs": { - "inpDir": { - "type": "collection", - "title": "Input collection", - "description": "Input image collection to be processed by this plugin.", - "required": "True" - }, - "filePattern": { - "type": "string", - "title": "Filename pattern", - "description": "Filename pattern used to separate data.", - "required": "False", - "default": ".*" - }, - "preview": { - "type": "boolean", - "title": "Preview", - "description": "Generate an output preview.", - "required": "False", - "default": "False" + "name": "{{cookiecutter.plugin_name}}", + "version": "{{cookiecutter.plugin_version}}", + "title": "{{cookiecutter.plugin_name}}", + "description": "{{cookiecutter.plugin_description}}", + "author": "{{cookiecutter.author}} ({{cookiecutter.author_email}})", + "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", + "repository": "https://github.com/PolusAI/tabular-tools", + "website": "https://ncats.nih.gov/preclinical/core/informatics", + "citation": "", + "containerId": "{{cookiecutter.container_id}}:{{cookiecutter.container_version}}", + "baseCommand": [ + "python3", + "-m", + "{{cookiecutter.plugin_package}}" + ], + "inputs": { + "inpDir": { + "type": "collection", + "title": "Input collection", + "description": "Input image collection to be processed by this plugin.", + "required": "True" + }, + "filePattern": { + "type": "string", + "title": "Filename pattern", + "description": "Filename pattern used to separate data.", + "required": "False", + "default": ".*" + }, + "preview": { + "type": "boolean", + "title": "Preview", + "description": "Generate an output preview.", + "required": "False", + "default": "False" } }, "outputs": { - "outDir": { - "type": "collection", - "description": "Output collection." - } + "outDir": { + "type": "collection", + "description": "Output collection." + } }, - "ui": { - "inpDir": { - "type": "collection", - "title": "Input collection", - "description": "Input image collection to be processed by this plugin.", - "required": "True" - }, - "filePattern": { - "type": "string", - "title": "Filename pattern", - "description": "Filename pattern used to separate data.", - "required": "False", - "default": "False" - }, - "preview": { - "type": "boolean", - "title": "Filename pattern", - "description": "Generate an output preview.", - "required": "False", - "default": "False" - } + "ui": { + "inpDir": { + "type": "collection", + "title": "Input collection", + "description": "Input image collection to be processed by this plugin.", + "required": "True" + }, + "filePattern": { + "type": "string", + "title": "Filename pattern", + "description": "Filename pattern used to separate data.", + "required": "False", + "default": "False" + }, + "preview": { + "type": "boolean", + "title": "Filename pattern", + "description": "Generate an output preview.", + "required": "False", + "default": "False" + } } } \ No newline at end of file diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/__main__.py b/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/__main__.py index 62f32f9..2487bdf 100644 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/__main__.py +++ b/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/__main__.py @@ -16,7 +16,7 @@ datefmt="%d-%b-%y %H:%M:%S", ) POLUS_LOG = getattr(logging, environ.get("POLUS_LOG", "INFO")) -logger = logging.getLogger("polus.plugins.package1.package2.awesome_function") +logger = logging.getLogger("polus.tabular.package1.package2.awesome_function") logger.setLevel(POLUS_LOG) POLUS_IMG_EXT = environ.get("POLUS_IMG_EXT", ".ome.tif") diff --git a/utils/polus-stitching-vector-merger-plugin/ict.yaml b/utils/polus-stitching-vector-merger-plugin/ict.yaml index 0c9b1df..09b4e47 100644 --- a/utils/polus-stitching-vector-merger-plugin/ict.yaml +++ b/utils/polus-stitching-vector-merger-plugin/ict.yaml @@ -43,7 +43,7 @@ outputs: name: outDir required: true type: path -repository: https://github.com/labshare/polus-plugins +repository: https://github.com/polusAI/tabular-tools specVersion: 1.0.0 title: Merge StitchingVector ui: diff --git a/utils/rxiv-download-tool/.bumpversion.cfg b/utils/rxiv-download-tool/.bumpversion.cfg index baf107b..8af24ea 100644 --- a/utils/rxiv-download-tool/.bumpversion.cfg +++ b/utils/rxiv-download-tool/.bumpversion.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 0.1.0-dev0 +current_version = 0.1.0-dev1 commit = False tag = False parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? @@ -24,6 +24,10 @@ replace = version = "{new_version}" [bumpversion:file:README.md] +[bumpversion:file:ict.yaml] + +[bumpversion:file:downloadrxivtextdata.cwl] + [bumpversion:file:plugin.json] -[bumpversion:file:src/polus/images/utils/rxiv_download/__init__.py] +[bumpversion:file:src/polus/tabular/utils/rxiv_download/__init__.py] diff --git a/utils/rxiv-download-tool/Dockerfile b/utils/rxiv-download-tool/Dockerfile index 13e8242..73d25ec 100644 --- a/utils/rxiv-download-tool/Dockerfile +++ b/utils/rxiv-download-tool/Dockerfile @@ -16,5 +16,5 @@ COPY src ${EXEC_DIR}/src RUN pip3 install ${EXEC_DIR} --no-cache-dir -ENTRYPOINT ["python3", "-m", "polus.images.utils.rxiv_download"] +ENTRYPOINT ["python3", "-m", "polus.tabular.utils.rxiv_download"] CMD ["--help"] diff --git a/utils/rxiv-download-tool/README.md b/utils/rxiv-download-tool/README.md index 5c90866..fcb0d7b 100644 --- a/utils/rxiv-download-tool/README.md +++ b/utils/rxiv-download-tool/README.md @@ -1,4 +1,4 @@ -# Rxiv Download (v0.1.0-dev0) +# Rxiv Download (v0.1.0-dev1) This plugin allows to download data from open access archives. Currently this plugin supports downloading data from [arxiv](https://www.openarchives.org/). Later additional support for other archives will be added. @@ -27,4 +27,4 @@ This plugin takes 2 input arguments and ## Sample docker command: -```docker run -v /home/ec2-user/data/:/home/ec2-user/data/ polusai/rxiv-download-tool:0.1.0-dev0 --rxiv="arXiv" --start='2023-2-16' --outDir=/home/ec2-user/data/output``` +```docker run -v /home/ec2-user/data/:/home/ec2-user/data/ polusai/rxiv-download-tool:0.1.0-dev1 --rxiv="arXiv" --start='2023-2-16' --outDir=/home/ec2-user/data/output``` diff --git a/utils/rxiv-download-tool/VERSION b/utils/rxiv-download-tool/VERSION index 206c085..6b1a238 100644 --- a/utils/rxiv-download-tool/VERSION +++ b/utils/rxiv-download-tool/VERSION @@ -1 +1 @@ -0.1.0-dev0 +0.1.0-dev1 diff --git a/utils/rxiv-download-tool/downloadrxivtextdata.cwl b/utils/rxiv-download-tool/downloadrxivtextdata.cwl index 694877e..201e5a9 100644 --- a/utils/rxiv-download-tool/downloadrxivtextdata.cwl +++ b/utils/rxiv-download-tool/downloadrxivtextdata.cwl @@ -24,7 +24,7 @@ outputs: type: Directory requirements: DockerRequirement: - dockerPull: polusai/rxiv-download-tool:0.1.0-dev0 + dockerPull: polusai/rxiv-download-tool:0.1.0-dev1 InitialWorkDirRequirement: listing: - entry: $(inputs.outDir) diff --git a/utils/rxiv-download-tool/ict.yaml b/utils/rxiv-download-tool/ict.yaml index 2f8bb5b..6950894 100644 --- a/utils/rxiv-download-tool/ict.yaml +++ b/utils/rxiv-download-tool/ict.yaml @@ -2,9 +2,9 @@ author: - Nick Schaub - Hamdah Shafqat contact: nick.schaub@nih.gov -container: polusai/rxiv-download-tool:0.1.0-dev0 +container: polusai/rxiv-download-tool:0.1.0-dev1 description: This plugin allows to download data from Rxiv website. -entrypoint: python3 -m polus.images.utils.rxiv_download +entrypoint: python3 -m polus.tabular.utils.rxiv_download inputs: - description: Pull records from open access archives. format: @@ -32,7 +32,7 @@ outputs: name: outDir required: true type: path -repository: https://github.com/PolusAI/image-tools +repository: https://github.com/PolusAI/tabular-tools specVersion: 1.0.0 title: Download Rxiv text data ui: @@ -48,4 +48,4 @@ ui: key: inputs.preview title: Preview example output of this plugin type: checkbox -version: 0.1.0-dev0 +version: 0.1.0-dev1 diff --git a/utils/rxiv-download-tool/plugin.json b/utils/rxiv-download-tool/plugin.json index 1954acc..8f08081 100644 --- a/utils/rxiv-download-tool/plugin.json +++ b/utils/rxiv-download-tool/plugin.json @@ -1,18 +1,18 @@ { "name": "Rxiv-Download", - "version": "0.1.0-dev0", + "version": "0.1.0-dev1", "title": "Download Rxiv text data", "description": "This plugin allows to download data from Rxiv website.", "author": "Nick Schaub (nick.schaub@nih.gov), Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov)", "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/PolusAI/image-tools", + "repository": "https://github.com/PolusAI/tabular-tools", "website": "https://ncats.nih.gov/preclinical/core/informatics", "citation": "", - "containerId": "polusai/rxiv-download-tool:0.1.0-dev0", + "containerId": "polusai/rxiv-download-tool:0.1.0-dev1", "baseCommand": [ "python3", "-m", - "polus.images.utils.rxiv_download" + "polus.tabular.utils.rxiv_download" ], "inputs": [ { @@ -67,4 +67,4 @@ "required": "False" } ] -} +} \ No newline at end of file diff --git a/utils/rxiv-download-tool/pyproject.toml b/utils/rxiv-download-tool/pyproject.toml index 5920838..c9dfcaf 100644 --- a/utils/rxiv-download-tool/pyproject.toml +++ b/utils/rxiv-download-tool/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] -name = "polus-images-utils-rxiv-download" -version = "0.1.0-dev0" +name = "polus-tabular-utils-rxiv-download" +version = "0.1.0-dev1" description = "Fetch text data from rxiv" authors = [ "Nick Schaub ", diff --git a/utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/__init__.py b/utils/rxiv-download-tool/src/polus/tabular/utils/rxiv_download/__init__.py similarity index 51% rename from utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/__init__.py rename to utils/rxiv-download-tool/src/polus/tabular/utils/rxiv_download/__init__.py index cc4f2cc..9414814 100644 --- a/utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/__init__.py +++ b/utils/rxiv-download-tool/src/polus/tabular/utils/rxiv_download/__init__.py @@ -1,3 +1,3 @@ """Rxiv Download Plugin.""" -__version__ = "0.1.0-dev0" +__version__ = "0.1.0-dev1" diff --git a/utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/__main__.py b/utils/rxiv-download-tool/src/polus/tabular/utils/rxiv_download/__main__.py similarity index 88% rename from utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/__main__.py rename to utils/rxiv-download-tool/src/polus/tabular/utils/rxiv_download/__main__.py index b9c4576..990992f 100644 --- a/utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/__main__.py +++ b/utils/rxiv-download-tool/src/polus/tabular/utils/rxiv_download/__main__.py @@ -6,8 +6,8 @@ from typing import Optional import typer -from polus.images.utils.rxiv_download.fetch import ArxivDownload -from polus.images.utils.rxiv_download.fetch import generate_preview +from polus.tabular.utils.rxiv_download.fetch import ArxivDownload +from polus.tabular.utils.rxiv_download.fetch import generate_preview app = typer.Typer() @@ -16,7 +16,7 @@ format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", datefmt="%d-%b-%y %H:%M:%S", ) -logger = logging.getLogger("polus.plugins.utils.rxiv_download") +logger = logging.getLogger("polus.tabular.utils.rxiv_download") @app.command() diff --git a/utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/fetch.py b/utils/rxiv-download-tool/src/polus/tabular/utils/rxiv_download/fetch.py similarity index 100% rename from utils/rxiv-download-tool/src/polus/images/utils/rxiv_download/fetch.py rename to utils/rxiv-download-tool/src/polus/tabular/utils/rxiv_download/fetch.py diff --git a/utils/rxiv-download-tool/tests/test_cli.py b/utils/rxiv-download-tool/tests/test_cli.py index f967909..807ea43 100644 --- a/utils/rxiv-download-tool/tests/test_cli.py +++ b/utils/rxiv-download-tool/tests/test_cli.py @@ -3,7 +3,7 @@ from typer.testing import CliRunner from pathlib import Path import pytest -from polus.images.utils.rxiv_download.__main__ import app +from polus.tabular.utils.rxiv_download.__main__ import app from .conftest import clean_directories import time diff --git a/utils/rxiv-download-tool/tests/test_fetch.py b/utils/rxiv-download-tool/tests/test_fetch.py index d2130ca..92a17cd 100644 --- a/utils/rxiv-download-tool/tests/test_fetch.py +++ b/utils/rxiv-download-tool/tests/test_fetch.py @@ -1,7 +1,7 @@ """Test Command line Tool.""" from pathlib import Path -import polus.images.utils.rxiv_download.fetch as ft +import polus.tabular.utils.rxiv_download.fetch as ft from .conftest import clean_directories import time import pytest diff --git a/visualization/polus-graph-pyramid-builder-plugin/ict.yaml b/visualization/polus-graph-pyramid-builder-plugin/ict.yaml index bec2443..34f01fb 100644 --- a/visualization/polus-graph-pyramid-builder-plugin/ict.yaml +++ b/visualization/polus-graph-pyramid-builder-plugin/ict.yaml @@ -33,7 +33,7 @@ outputs: name: outDir required: true type: path -repository: https://github.com/labshare/polus-plugins +repository: https://github.com/PolusAI/tabular-tools specVersion: 1.0.0 title: Graph Pyramid Building ui: diff --git a/visualization/polus-graph-pyramid-builder-plugin/plugin.json b/visualization/polus-graph-pyramid-builder-plugin/plugin.json index c25cfba..1864c1b 100644 --- a/visualization/polus-graph-pyramid-builder-plugin/plugin.json +++ b/visualization/polus-graph-pyramid-builder-plugin/plugin.json @@ -5,7 +5,7 @@ "description": "Generates heatmaps from the data in a csv and builds a DeepZoom pyramid for visualization.", "author": "Madhuri Vihani (Madhuri.Vihani@nih.gov), Nick Schaub (Nick.Schaub@nih.gov)", "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/labshare/polus-plugins", + "repository": "https://github.com/PolusAI/tabular-tools", "website": "https://ncats.nih.gov/preclinical/core/informatics", "citation": "", "containerId": "polusai/graph-pyramid-builder-plugin:1.3.8", @@ -69,4 +69,4 @@ "description": "Create logarithmically scaled, linearly scaled, or both graphs" } ] -} +} \ No newline at end of file diff --git a/visualization/tabular-to-microjson-tool/README.md b/visualization/tabular-to-microjson-tool/README.md deleted file mode 100644 index 3d1b5a7..0000000 --- a/visualization/tabular-to-microjson-tool/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# Tabular To Microjson(v0.1.1) - -This plugin uses [MICROJSON](https://github.com/bengtl/microjson/tree/dev) python library to generate JSON from tabular data which can be used in -[RENDER UI](https://render.ci.ncats.io/?imageUrl=https://files.scb-ncats.io/pyramids/Idr0033/precompute/41744/x(00-15)_y(01-24)_p0(1-9)_c(1-5)/) -application for visualization of microscopy images. - -This plugin allows to calculate geometry coordinates i-e `Polygon` and `Point` using image positions from corresponding stitching vector. -Note: The filenames of tabular and stitching vector should be same -`groupBy` is used when there are more than one image in each well then pass a `variable` used in `stitchPattern` to group filenames in a stitching vector to compute geometry coordinates. - -Note: Currently this plugin supports two geometry types `Polygon` and `Point`.A future work requires additional support of more geometry types in this plugin. - -Currently this plugins handles only three file formats supported by vaex. -1. csv -2. arrow -3. feather - - -Contact [Hamdah Shafqat Abbasi](mailto:hamdahshafqat.abbasi@nih.gov) for more information. -For more information on WIPP, visit the -[official WIPP page](https://isg.nist.gov/deepzoomweb/software/wipp). - -## Building - -To build the Docker image for the conversion plugin, run -`./build-docker.sh`. - -## Install WIPP Plugin - -If WIPP is running, navigate to the plugins page and add a new plugin. Paste the -contents of `plugin.json` into the pop-up window and submit. - -## Options - -This plugin can take seven input arguments and one output argument: - -| Name | Description | I/O | Type | -|-------------------|-------------------------------------------------------|--------|--------------| -| `inpDir` | Input directory | Input | string | -| `stitchDir` | Directory containing stitching vectors | Input | string | -| `filePattern` | Pattern to parse tabular filenames | Input | string | -| `stitchPattern` | Pattern to parse filenames in stitching vector | Input | string | -| `groupBy` | Variable to group filenames in stitching vector | Input | string | -| `geometryType` | Geometry type (Polygon, Point) | Input | string | -| `outDir` | Output directory for overlays | Output | string | -| `preview` | Generate a JSON file with outputs | Output | JSON | - -## Run the plugin - -### Run the Docker Container - -```bash -docker run -v /data:/data polusai/tabular-to-microjson-plugin:0.1.1 \ - --inpDir /data/input \ - --stitchDir /data/stitchvector \ - --filePattern ".*.csv" \ - --stitchPattern "x{x:dd}_y{y:dd}_c{c:d}.ome.tif" \ - --groupBy None \ - --geometryType "Polygon" \ - --outDir /data/output \ - --preview -``` diff --git a/visualization/tabular-to-microjson-tool/VERSION b/visualization/tabular-to-microjson-tool/VERSION deleted file mode 100644 index 9d8d2c1..0000000 --- a/visualization/tabular-to-microjson-tool/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.1.2-dev0 diff --git a/visualization/tabular-to-microjson-tool/build-docker.sh b/visualization/tabular-to-microjson-tool/build-docker.sh deleted file mode 100644 index c63f2c2..0000000 --- a/visualization/tabular-to-microjson-tool/build-docker.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -version=$("] -readme = "README.md" -packages = [{include = "polus", from = "src"}] - -[tool.poetry.dependencies] -python = ">=3.9,<4.0" -typer = "^0.7.0" -filepattern = "^2.0.1" -tqdm = "^4.65.0" -pandas = "^2.0.3" -microjson = "^0.1.9" -vaex = "^4.17.0" -pydantic = "^2.4.2" - - -[tool.poetry.group.dev.dependencies] -bump2version = "^1.0.1" -pre-commit = "^3.1.0" -black = "^23.1.0" -flake8 = "^6.0.0" -mypy = "^1.0.1" -pytest = "^7.2.1" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" diff --git a/visualization/tabular-to-microjson-tool/run-plugin.sh b/visualization/tabular-to-microjson-tool/run-plugin.sh deleted file mode 100644 index d3f055c..0000000 --- a/visualization/tabular-to-microjson-tool/run-plugin.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash - -version=$( None: - """Apply Render Overlay to input tabular data to create microjson overlays.""" - logger.info(f"inpDir = {inp_dir}") - logger.info(f"filePattern = {file_pattern}") - logger.info(f"stitchDir = {stitch_dir}") - logger.info(f"geometryType = {geometry_type}") - logger.info(f"stitchPattern = {stitch_pattern}") - logger.info(f"groupBy = {group_by}") - logger.info(f"outDir = {out_dir}") - - inp_dir = inp_dir.resolve() - out_dir = out_dir.resolve() - - fps = fp.FilePattern(inp_dir, file_pattern) - - files = [file[1][0] for file in fps()] - - with ThreadPoolExecutor(max_workers=num_workers) as executor: - for file in tqdm(files, desc="Creating overlays", total=len(files)): - fname = pathlib.Path(file).stem - stitch_path = stitch_dir.joinpath(f"{fname}.txt") - if geometry_type == "Polygon": - poly = mo.PolygonSpec( - stitch_path=str(stitch_path), - stitch_pattern=stitch_pattern, - group_by=group_by, - ) - else: - poly = mo.PointSpec( - stitch_path=str(stitch_path), - stitch_pattern=stitch_pattern, - group_by=group_by, - ) - - micro_model = mo.RenderOverlayModel( - file_path=file, - coordinates=poly.get_coordinates, - geometry_type=geometry_type, - out_dir=out_dir, - ) - executor.submit(micro_model.microjson_overlay) - - if preview: - shutil.copy( - pathlib.Path(__file__) - .parents[5] - .joinpath(f"examples/example_overlay_{geometry_type}.json"), - out_dir, - ) - - -if __name__ == "__main__": - app() diff --git a/visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/microjson_overlay.py b/visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/microjson_overlay.py deleted file mode 100644 index 6e249c1..0000000 --- a/visualization/tabular-to-microjson-tool/src/polus/images/visualization/tabular_to_microjson/microjson_overlay.py +++ /dev/null @@ -1,413 +0,0 @@ -"""Render Overlay.""" -import ast -import logging -import os -from pathlib import Path -from typing import Any -from typing import Optional -from typing import Union - -import filepattern as fp -import microjson.model as mj -import numpy as np -import pydantic -import vaex -from pydantic import root_validator -from pydantic import validator - -logger = logging.getLogger(__name__) -logger.setLevel(logging.INFO) - -POLUS_TAB_EXT = os.environ.get("POLUS_TAB_EXT", ".csv") -EXT = (".arrow", ".feather") - - -def convert_vaex_dataframe(file_path: Path) -> vaex.dataframe.DataFrame: - """The vaex reading of tabular data with (".csv", ".feather", ".arrow") format. - - Args: - file_path: Path to tabular data. - - Returns: - A vaex dataframe. - """ - if file_path.name.endswith(".csv"): - return vaex.read_csv(Path(file_path), convert=True, chunk_size=5_000_000) - if file_path.name.endswith(EXT): - return vaex.open(Path(file_path)) - return None - - -class CustomOverlayModel(pydantic.BaseModel): - """Setting up configuration for pydantic base model.""" - - class Config: - """Model configuration.""" - - extra = "allow" - allow_population_by_field_name = True - - -class Validator(CustomOverlayModel): - """Validate stiching vector path and stiching pattern fields. - - This validates values passed for stitch_path and stitch_pattern attributes. - - Args: - stitch_path: Path to the stitching vector, containing x and y image positions. - stitch_pattern: Pattern to parse image filenames in stitching vector. - - Returns: - Attribute values - - """ - - stitch_path: str - stitch_pattern: str - - @root_validator(pre=True) - def validate_stitch_path(cls, values: dict) -> dict: # noqa: N805 - """Validate stitch path and stitch pattern.""" - stitch_path = values.get("stitch_path") - stitch_pattern = values.get("stitch_pattern") - if stitch_path is not None and not Path(stitch_path).exists(): - msg = "Stitching path does not exists!! Please do check path again" - raise ValueError(msg) - if stitch_path is not None and Path(stitch_path).exists(): - with Path.open(Path(stitch_path)) as f: - line = f.readlines() - if line is None: - msg = ( - "Stitching vector is empty so grid positions cannot be defined" - ) - raise ValueError(msg) - if stitch_path is not None and Path(stitch_path).exists(): - files = fp.FilePattern(stitch_path, stitch_pattern) - if len(files) == 0: - msg = "Define stitch pattern again!!! as it is unable to parse file" - raise ValueError(msg) - - return values - - -class PolygonSpec(Validator): - """Polygon is a two-dimensional planar shape with straight sides. - - This generates rectangular polygon coordinates from (x, y) coordinate positions. - - Args: - stitch_path: Path to the stitching vector, containing x and y image positions. - stitch_pattern: Pattern to parse image filenames in stitching vector. - group_by: Variable to group image filenames in stitching vector. - - Returns: - A list of a list of tuples of rectangular polygon coordinates. - - """ - - stitch_path: str - stitch_pattern: str - group_by: Optional[str] = None - - @property - def get_coordinates(self) -> list[Any]: - """Generate rectangular polygon coordinates.""" - files = fp.FilePattern(self.stitch_path, self.stitch_pattern) - self.group_by = None if self.group_by == "None" else self.group_by - - if self.group_by is not None: - var_list = files.get_unique_values() - var_dict = {k: len(v) for k, v in var_list.items() if k == self.group_by} - gp_value = var_dict[self.group_by] - gp_dict = {self.group_by: gp_value} - - coordinates = [] - for i, matching in enumerate(files.get_matching(**gp_dict)): - if i == 0: - cell_width = matching[0]["posX"] - x, y = matching[0]["posX"], matching[0]["posY"] - pos1 = [x, y] - pos2 = [x + cell_width, y] - pos3 = [x + cell_width, y + cell_width] - pos4 = [x, y + cell_width] - pos5 = [x, y] - poly = str([[pos1, pos2, pos3, pos4, pos5]]) - if gp_value: - poly = np.repeat(str(poly), gp_value) - coordinates.append(poly) - coordinates = np.concatenate(coordinates).ravel().tolist() - else: - coordinates = [] - cell_width = list(files())[1][0]["posX"] - for _, file in enumerate(files()): - x, y = file[0]["posX"], file[0]["posY"] - pos1 = [x, y] - pos2 = [x + cell_width, y] - pos3 = [x + cell_width, y + cell_width] - pos4 = [x, y + cell_width] - pos5 = [x, y] - poly = str([[pos1, pos2, pos3, pos4, pos5]]) - coordinates.append(poly) - - mapped_coordinates = [] - for file, cor in zip(files(), coordinates): - filename = str(file[1][0]) - coord_dict = {"file": filename, "coordinates": cor} - mapped_coordinates.append(coord_dict) - - return mapped_coordinates - - -class PointSpec(Validator): - """Polygon is a two-dimensional planar shape with straight sides. - - This generates rectangular polygon coordinates from (x, y) coordinate positions. - - Args: - stitch_path: Path to the stitching vector, containing x and y image positions. - stitch_pattern: Pattern to parse image filenames in stitching vector. - group_by: Variable to group image filenames in stitching vector. - - Returns: - A list of tuples of centroids of a rectangular polygon.. - - """ - - stitch_path: str - stitch_pattern: str - group_by: Optional[str] = None - - @property - def get_coordinates(self) -> list[Any]: - """Generate rectangular polygon coordinates.""" - files = fp.FilePattern(self.stitch_path, self.stitch_pattern) - self.group_by = None if self.group_by == "None" else self.group_by - - if self.group_by is not None: - var_list = files.get_unique_values() - var_dict = {k: len(v) for k, v in var_list.items() if k == self.group_by} - gp_value = var_dict[self.group_by] - gp_dict = {self.group_by: gp_value} - - coordinates = [] - for i, matching in enumerate(files.get_matching(**gp_dict)): - if i == 0: - cell_width = matching[0]["posY"] - x, y = matching[0]["posX"], matching[0]["posY"] - x1 = x - y1 = y + cell_width - x2 = x + cell_width - y2 = y - position = ((x1 + x2) / 2, (y1 + y2) / 2) - if gp_value: - poly = np.repeat(str(position), gp_value) - coordinates.append(poly) - coordinates = np.concatenate(coordinates).ravel().tolist() - - else: - coordinates = [] - cell_width = list(files())[1][0]["posX"] - for _, file in enumerate(files()): - x, y = file[0]["posX"], file[0]["posY"] - x1 = x - y1 = y + cell_width - x2 = x + cell_width - y2 = y - position = ((x1 + x2) / 2, (y1 + y2) / 2) - coordinates.append(position) - - mapped_coordinates = [] - for file, cor in zip(files(), coordinates): - filename = str(file[1][0]) - coord_dict = {"file": filename, "coordinates": cor} - mapped_coordinates.append(coord_dict) - - return mapped_coordinates - - -class ValidatedProperties(mj.Properties): - """Properties with validation.""" - - @validator("string", pre=True, each_item=True) - def validate_str( - cls, - v: Union[str, None], - ) -> str: # noqa: N805 - """Validate string.""" - if v is None: - return "" - return v - - @validator("numeric", pre=True, each_item=True) - def validate_num( - cls, - v: Union[int, None], - ) -> Union[int, None]: # noqa: N805 - """Validate numeric.""" - if v is None: - return np.nan - return v - - -class RenderOverlayModel(CustomOverlayModel): - """Generate JSON overlays using microjson python package. - - Args: - file_path: Path to input file. - coordinates: List of geometry coordinates. - geometry_type: Type of geometry (Polygon, Points, bbbox). - out_dir: Path to output directory. - """ - - file_path: Path - coordinates: list[Any] - geometry_type: str - out_dir: Path - - @pydantic.validator("file_path", pre=True) - def validate_file_path(cls, value: Path) -> Path: # noqa: N805 - """Validate file path.""" - if not Path(value).exists(): - msg = "File path does not exists!! Please do check path again" - raise ValueError(msg) - if ( - Path(value).exists() - and not Path(value).name.startswith(".") - and Path(value).name.endswith(".csv") - ): - data = vaex.read_csv(Path(value)) - if data.shape[0] | data.shape[1] == 0: - msg = "data doesnot exists" - raise ValueError(msg) - - elif ( - Path(value).exists() - and not Path(value).name.startswith(".") - and Path(value).name.endswith(EXT) - ): - data = vaex.open(Path(value)) - if data.shape[0] | data.shape[1] == 0: - msg = "data doesnot exists" - raise ValueError(msg) - - return value - - @property - def microjson_overlay(self) -> None: - """Create microjson overlays in JSON Format.""" - if self.file_path.name.endswith((".csv", ".feather", ".arrow")): - data = convert_vaex_dataframe(self.file_path) - des_columns = [ - feature - for feature in data.get_column_names() - if data.data_type(feature) == str - ] - - int_columns = [ - feature - for feature in data.get_column_names() - if data.data_type(feature) == int or data.data_type(feature) == float - ] - - if len(int_columns) == 0: - msg = "Features with integer datatype do not exist" - raise ValueError(msg) - - if len(des_columns) == 0: - msg = "Descriptive features do not exist" - raise ValueError(msg) - - data["geometry_type"] = np.repeat(self.geometry_type, data.shape[0]) - data["type"] = np.repeat("Feature", data.shape[0]) - - excolumns = ["geometry_type", "type"] - - des_columns = [col for col in des_columns if col not in excolumns] - - features: list[mj.Feature] = [] - - for d, cor in zip(data.iterrows(), self.coordinates): - _, row = d - if row["intensity_image"] == cor["file"]: - desc = [{key: row[key]} for key in des_columns] - nume = [{key: row[key]} for key in int_columns] - - descriptive_dict = {} - for sub_dict in desc: - descriptive_dict.update(sub_dict) - - numeric_dict = {} - for sub_dict in nume: - numeric_dict.update(sub_dict) - - GeometryClass = getattr(mj, row["geometry_type"]) # noqa: N806 - cor_value = ast.literal_eval(cor["coordinates"]) - geometry = GeometryClass( - type=row["geometry_type"], - coordinates=cor_value, - ) - - # create a new properties object dynamically - properties = ValidatedProperties( - string=descriptive_dict, - numeric=numeric_dict, - ) - - # Create a new Feature object - feature = mj.MicroFeature( - type=row["type"], - geometry=geometry, - properties=properties, - ) - features.append(feature) - - valrange = [ - {i: {"min": data[i].min(), "max": data[i].max()}} for i in int_columns - ] - valrange_dict = {} - for sub_dict in valrange: - valrange_dict.update(sub_dict) - - # Create a list of descriptive fields - descriptive_fields = des_columns - - # Create a new FeatureCollection object - feature_collection = mj.MicroFeatureCollection( - type="FeatureCollection", - features=features, - value_range=valrange_dict, - descriptive_fields=descriptive_fields, - coordinatesystem={ - "axes": [ - { - "name": "x", - "unit": "micrometer", - "type": "cartesian", - "pixelsPerUnit": 1, - "description": "x-axis", - }, - { - "name": "y", - "unit": "micrometer", - "type": "cartesian", - "pixelsPerUnit": 1, - "description": "y-axis", - }, - ], - "origo": "top-left", - }, - ) - - if len(feature_collection.model_dump_json()) == 0: - msg = "JSON file is empty" - raise ValueError(msg) - if len(feature_collection.model_dump_json()) > 0: - out_name = Path(self.out_dir, f"{self.file_path.stem}_overlay.json") - with Path.open(out_name, "w") as f: - f.write( - feature_collection.model_dump_json( - indent=2, - exclude_unset=True, - ), - ) - logger.info(f"Saving overlay json file: {out_name}") diff --git a/visualization/tabular-to-microjson-tool/tabulartomicrojson.cwl b/visualization/tabular-to-microjson-tool/tabulartomicrojson.cwl deleted file mode 100644 index 2e2d5c4..0000000 --- a/visualization/tabular-to-microjson-tool/tabulartomicrojson.cwl +++ /dev/null @@ -1,44 +0,0 @@ -class: CommandLineTool -cwlVersion: v1.2 -inputs: - filePattern: - inputBinding: - prefix: --filePattern - type: string? - geometryType: - inputBinding: - prefix: --geometryType - type: string? - groupBy: - inputBinding: - prefix: --groupBy - type: string? - inpDir: - inputBinding: - prefix: --inpDir - type: Directory - outDir: - inputBinding: - prefix: --outDir - type: Directory - stitchDir: - inputBinding: - prefix: --stitchDir - type: Directory - stitchPattern: - inputBinding: - prefix: --stitchPattern - type: string -outputs: - outDir: - outputBinding: - glob: $(inputs.outDir.basename) - type: Directory -requirements: - DockerRequirement: - dockerPull: polusai/tabular-to-microjson-tool:0.1.2-dev0 - InitialWorkDirRequirement: - listing: - - entry: $(inputs.outDir) - writable: true - InlineJavascriptRequirement: {} diff --git a/visualization/tabular-to-microjson-tool/tests/__init__.py b/visualization/tabular-to-microjson-tool/tests/__init__.py deleted file mode 100644 index c02efbd..0000000 --- a/visualization/tabular-to-microjson-tool/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Test for tabular to microjson package.""" diff --git a/visualization/tabular-to-microjson-tool/tests/test_microjson_overlay.py b/visualization/tabular-to-microjson-tool/tests/test_microjson_overlay.py deleted file mode 100644 index 7e8d7d8..0000000 --- a/visualization/tabular-to-microjson-tool/tests/test_microjson_overlay.py +++ /dev/null @@ -1,244 +0,0 @@ -"""Test for tabular to microjson package.""" -import json -import pathlib -import shutil -import string -import tempfile - -import numpy as np -import pandas as pd -import pytest -import vaex -from polus.images.visualization.tabular_to_microjson import microjson_overlay as mo -from polus.images.visualization.tabular_to_microjson.__main__ import app -from typer.testing import CliRunner - -runner = CliRunner() - - -@pytest.fixture() -def output_directory() -> pathlib.Path: - """Generate output directory.""" - return pathlib.Path(tempfile.mkdtemp(dir=pathlib.Path.cwd())) - - -@pytest.fixture() -def input_directory() -> pathlib.Path: - """Generate output directory.""" - return pathlib.Path(tempfile.mkdtemp(dir=pathlib.Path.cwd())) - - -def clean_directories() -> None: - """Remove all temporary directories.""" - for d in pathlib.Path(".").cwd().iterdir(): - if d.is_dir() and d.name.startswith("tmp"): - shutil.rmtree(d) - - -@pytest.fixture( - params=[ - (384, 2170, "Polygon", ".csv"), - ], -) -def get_params(request: pytest.FixtureRequest) -> tuple[int, int, str, str]: - """To get the parameter of the fixture.""" - return request.param - - -@pytest.fixture() -def generate_synthetic_data( - input_directory: pathlib.Path, - get_params: tuple[int, int, str, str], -) -> tuple[pathlib.Path, pathlib.Path]: - """Generate tabular data.""" - nrows, cell_width, geometry_type, file_extension = get_params - n = int(nrows / 384) - - rng = np.random.default_rng(42) - - pathlib.Path.mkdir(pathlib.Path(input_directory, "data")) - pathlib.Path.mkdir(pathlib.Path(input_directory, "stvector")) - - flist = [] - for x in range(16): - for y in range(24): - for p in range(n): - fname = ( - f"x{x}".zfill(2) - + f"_y{y}".zfill(2) - + f"_p{p}".zfill(2) - + "_c1.ome.tif" - ) - flist.append(fname) - position = (y * cell_width, x * cell_width) - stvector = ( - f"file: {fname}; corr: 0; position: {position}; grid: {(y, x)};" - ) - stitch_path = pathlib.Path(input_directory, "stvector/data.txt") - with pathlib.Path.open(stitch_path, "a") as file: - file.write(f"{stvector}\n") - file.close() - diction_1 = { - "intensity_image": flist, - "Plate": np.repeat("preZ", nrows).tolist(), - "Well": [ - f"{s}{num}" - for s in string.ascii_letters.upper()[:16] - for num in range(24) - for p in range(n) - ], - "Characteristics [Organism 2]": np.repeat( - "Herpes simplex virus type 1", - nrows, - ).tolist(), - "Characteristics [Cell Line]": np.repeat("A549", nrows).tolist(), - "Compound Name": [rng.choice(["DMSO", "Ganciclovir"]) for i in range(nrows)], - "Control Type": [ - rng.choice(["negative control", "positive control"]) for i in range(nrows) - ], - "numberOfNuclei": rng.integers( - low=2500, - high=100000, - size=nrows, - ), - "maxVirusIntensity": rng.integers( - low=500, - high=30000, - size=nrows, - ), - } - - df = pd.DataFrame(diction_1) - if file_extension == ".csv": - outpath = pathlib.Path(input_directory, "data/data.csv") - df.to_csv(outpath, index=False) - if file_extension == ".feather": - outpath = pathlib.Path(input_directory, "data/data.feather") - df.to_feather(outpath) - if file_extension == ".arrow": - outpath = pathlib.Path(input_directory, "data/data.arrow") - df.to_feather(outpath) - - return outpath, stitch_path - - -def test_convert_vaex_dataframe( - generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], -) -> None: - """Converting tabular data to vaex dataframe.""" - outpath, _ = generate_synthetic_data - vaex_df = mo.convert_vaex_dataframe(outpath) - assert type(vaex_df) == vaex.dataframe.DataFrameLocal - assert len(list(vaex_df.columns)) != 0 - assert vaex_df.shape[0] > 0 - clean_directories() - - -def test_generate_polygon_coordinates( - generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], -) -> None: - """Test generating polygon coordinates using stitching vector.""" - _, stitch_dir = generate_synthetic_data - stitch_pattern = "x{x:dd}_y{y:dd}_p{p:d}_c{c:d}.ome.tif" - group_by = None - - model = mo.PolygonSpec( - stitch_path=str(stitch_dir), - stitch_pattern=stitch_pattern, - group_by=group_by, - ) - poly = model.get_coordinates - assert all(len(i) for p in poly[0]["coordinates"] for i in p) is True - clean_directories() - - -def test_generate_rectangular_polygon_centroids( - generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], -) -> None: - """Test generating centroid rectangular coordinates using stitching vector.""" - _, stitch_dir = generate_synthetic_data - stitch_pattern = "x{x:dd}_y{y:dd}_p{p:d}_c{c:d}.ome.tif" - group_by = None - model = mo.PointSpec( - stitch_path=str(stitch_dir), - stitch_pattern=stitch_pattern, - group_by=group_by, - ) - poly = model.get_coordinates - expected_len = 2 - assert len(poly[0]["coordinates"]) == expected_len - clean_directories() - - -def test_render_overlay_model( - generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], - output_directory: pathlib.Path, - get_params: tuple[int, int, str, str], -) -> None: - """Test render overlay model.""" - inp_dir, stitch_dir = generate_synthetic_data - stitch_pattern = "x{x:dd}_y{y:dd}_p{p:d}_c{c:d}.ome.tif" - _, _, geometry_type, _ = get_params - group_by = None - - if geometry_type == "Polygon": - model = mo.PolygonSpec( - stitch_path=str(stitch_dir), - stitch_pattern=stitch_pattern, - group_by=group_by, - ) - - if geometry_type == "Point": - model = mo.PointSpec( - stitch_path=str(stitch_dir), - stitch_pattern=stitch_pattern, - group_by=group_by, - ) - poly = model.get_coordinates - - microjson = mo.RenderOverlayModel( - file_path=inp_dir, - coordinates=poly, - geometry_type=geometry_type, - out_dir=output_directory, - ) - mjson = microjson.microjson_overlay - out_file = pathlib.Path(output_directory, "data_overlay.json") - with pathlib.Path.open(out_file) as jfile: - mjson = json.load(jfile) - assert len(mjson) != 0 - clean_directories() - - -def test_cli( - generate_synthetic_data: tuple[pathlib.Path, pathlib.Path], - output_directory: pathlib.Path, - get_params: tuple[int, int, str, str], -) -> None: - """Test Cli.""" - inp_dir, stitch_dir = generate_synthetic_data - - stitch_pattern = "x{x:dd}_y{y:dd}_p{p:d}_c{c:d}.ome.tif" - _, _, geometry_type, _ = get_params - - result = runner.invoke( - app, - [ - "--inpDir", - inp_dir.parent, - "--stitchDir", - stitch_dir.parent, - "--filePattern", - ".+", - "--stitchPattern", - stitch_pattern, - "--groupBy", - None, - "--geometryType", - geometry_type, - "--outDir", - pathlib.Path(output_directory), - ], - ) - assert result.exit_code == 0 - clean_directories() From b698150e47b13f1cb092165d2fa08b782eee29de Mon Sep 17 00:00:00 2001 From: hamshkhawar Date: Tue, 9 Apr 2024 11:25:20 -0500 Subject: [PATCH 3/6] deleted source code for polus-plugins --- Jenkinsfile | 75 -- src/polus/tabular/__init__.py | 65 -- src/polus/tabular/_plugins/VERSION | 1 - src/polus/tabular/_plugins/__init__.py | 0 src/polus/tabular/_plugins/_compat.py | 4 - .../tabular/_plugins/classes/__init__.py | 27 - .../tabular/_plugins/classes/plugin_base.py | 311 -------- .../_plugins/classes/plugin_classes.py | 472 ------------ src/polus/tabular/_plugins/cwl/__init__.py | 3 - src/polus/tabular/_plugins/cwl/base.cwl | 17 - src/polus/tabular/_plugins/cwl/cwl.py | 7 - src/polus/tabular/_plugins/gh.py | 65 -- src/polus/tabular/_plugins/io/__init__.py | 21 - src/polus/tabular/_plugins/io/_io.py | 597 -------------- .../tabular/_plugins/manifests/__init__.py | 15 - .../_plugins/manifests/manifest_utils.py | 210 ----- .../_plugins/models/PolusComputeSchema.json | 499 ------------ .../_plugins/models/PolusComputeSchema.ts | 102 --- src/polus/tabular/_plugins/models/__init__.py | 35 - .../models/pydanticv1/PolusComputeSchema.py | 137 ---- .../models/pydanticv1/WIPPPluginSchema.py | 233 ------ .../_plugins/models/pydanticv1/__init__.py | 0 .../_plugins/models/pydanticv1/compute.py | 28 - .../_plugins/models/pydanticv1/wipp.py | 79 -- .../models/pydanticv2/PolusComputeSchema.py | 136 ---- .../models/pydanticv2/WIPPPluginSchema.py | 241 ------ .../_plugins/models/pydanticv2/__init__.py | 0 .../_plugins/models/pydanticv2/compute.py | 28 - .../_plugins/models/pydanticv2/wipp.py | 79 -- .../models/wipp-plugin-manifest-schema.json | 726 ------------------ src/polus/tabular/_plugins/registry.py | 280 ------- src/polus/tabular/_plugins/registry_utils.py | 135 ---- src/polus/tabular/_plugins/update/__init__.py | 6 - src/polus/tabular/_plugins/update/_update.py | 116 --- src/polus/tabular/_plugins/utils.py | 17 - tests/__init__.py | 1 - tests/resources/b1.json | 77 -- tests/resources/b2.json | 76 -- tests/resources/b3.json | 76 -- tests/resources/g1.json | 78 -- tests/resources/g2.json | 77 -- tests/resources/g3.json | 77 -- tests/resources/omeconverter022.json | 45 -- tests/resources/tabularconverter.json | 75 -- tests/resources/target1.cwl | 32 - tests/test_cwl.py | 105 --- tests/test_io.py | 69 -- tests/test_manifests.py | 236 ------ tests/test_plugins.py | 198 ----- tests/test_version.py | 171 ----- utils/polus-python-template/.bumpversion.cfg | 23 - utils/polus-python-template/.gitignore | 1 - utils/polus-python-template/CHANGELOG.md | 14 - utils/polus-python-template/README.md | 112 --- utils/polus-python-template/VERSION | 1 - .../hooks/post_gen_project.py | 63 -- .../hooks/pre_gen_project.py | 55 -- utils/polus-python-template/pyproject.toml | 32 - .../.bumpversion.cfg | 29 - .../.dockerignore | 4 - .../.gitignore | 1 - .../CHANGELOG.md | 5 - .../Dockerfile | 26 - .../{{cookiecutter.container_name}}/README.md | 23 - .../{{cookiecutter.container_name}}/VERSION | 1 - .../build-docker.sh | 4 - .../plugin.json | 67 -- .../pyproject.toml | 32 - .../run-plugin.sh | 20 - .../__init__.py | 7 - .../__main__.py | 87 --- .../{{ cookiecutter.package_name }}.py | 16 - .../tests/__init__.py | 1 - .../tests/conftest.py | 147 ---- .../tests/test_cli.py | 96 --- .../test_{{cookiecutter.package_name}}.py | 22 - 76 files changed, 7049 deletions(-) delete mode 100644 Jenkinsfile delete mode 100644 src/polus/tabular/__init__.py delete mode 100644 src/polus/tabular/_plugins/VERSION delete mode 100644 src/polus/tabular/_plugins/__init__.py delete mode 100644 src/polus/tabular/_plugins/_compat.py delete mode 100644 src/polus/tabular/_plugins/classes/__init__.py delete mode 100644 src/polus/tabular/_plugins/classes/plugin_base.py delete mode 100644 src/polus/tabular/_plugins/classes/plugin_classes.py delete mode 100644 src/polus/tabular/_plugins/cwl/__init__.py delete mode 100644 src/polus/tabular/_plugins/cwl/base.cwl delete mode 100644 src/polus/tabular/_plugins/cwl/cwl.py delete mode 100644 src/polus/tabular/_plugins/gh.py delete mode 100644 src/polus/tabular/_plugins/io/__init__.py delete mode 100644 src/polus/tabular/_plugins/io/_io.py delete mode 100644 src/polus/tabular/_plugins/manifests/__init__.py delete mode 100644 src/polus/tabular/_plugins/manifests/manifest_utils.py delete mode 100644 src/polus/tabular/_plugins/models/PolusComputeSchema.json delete mode 100644 src/polus/tabular/_plugins/models/PolusComputeSchema.ts delete mode 100644 src/polus/tabular/_plugins/models/__init__.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv1/PolusComputeSchema.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv1/WIPPPluginSchema.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv1/__init__.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv1/compute.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv1/wipp.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv2/PolusComputeSchema.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv2/WIPPPluginSchema.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv2/__init__.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv2/compute.py delete mode 100644 src/polus/tabular/_plugins/models/pydanticv2/wipp.py delete mode 100644 src/polus/tabular/_plugins/models/wipp-plugin-manifest-schema.json delete mode 100644 src/polus/tabular/_plugins/registry.py delete mode 100644 src/polus/tabular/_plugins/registry_utils.py delete mode 100644 src/polus/tabular/_plugins/update/__init__.py delete mode 100644 src/polus/tabular/_plugins/update/_update.py delete mode 100644 src/polus/tabular/_plugins/utils.py delete mode 100644 tests/__init__.py delete mode 100644 tests/resources/b1.json delete mode 100644 tests/resources/b2.json delete mode 100644 tests/resources/b3.json delete mode 100644 tests/resources/g1.json delete mode 100644 tests/resources/g2.json delete mode 100644 tests/resources/g3.json delete mode 100644 tests/resources/omeconverter022.json delete mode 100644 tests/resources/tabularconverter.json delete mode 100644 tests/resources/target1.cwl delete mode 100644 tests/test_cwl.py delete mode 100644 tests/test_io.py delete mode 100644 tests/test_manifests.py delete mode 100644 tests/test_plugins.py delete mode 100644 tests/test_version.py delete mode 100644 utils/polus-python-template/.bumpversion.cfg delete mode 100644 utils/polus-python-template/.gitignore delete mode 100644 utils/polus-python-template/CHANGELOG.md delete mode 100644 utils/polus-python-template/README.md delete mode 100644 utils/polus-python-template/VERSION delete mode 100644 utils/polus-python-template/hooks/post_gen_project.py delete mode 100644 utils/polus-python-template/hooks/pre_gen_project.py delete mode 100644 utils/polus-python-template/pyproject.toml delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/.bumpversion.cfg delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/.dockerignore delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/.gitignore delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/CHANGELOG.md delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/README.md delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/VERSION delete mode 100755 utils/polus-python-template/{{cookiecutter.container_name}}/build-docker.sh delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/plugin.json delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/pyproject.toml delete mode 100755 utils/polus-python-template/{{cookiecutter.container_name}}/run-plugin.sh delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/__init__.py delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/__main__.py delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/{{ cookiecutter.package_name }}.py delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/tests/__init__.py delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/tests/conftest.py delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_cli.py delete mode 100644 utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_{{cookiecutter.package_name}}.py diff --git a/Jenkinsfile b/Jenkinsfile deleted file mode 100644 index 0fa72d1..0000000 --- a/Jenkinsfile +++ /dev/null @@ -1,75 +0,0 @@ -pipeline { - agent { - node { label 'linux && build && aws' } - } - environment { - PROJECT_URL = 'https://github.com/polusai/polus-plugins' - } - triggers { - pollSCM('H/5 * * * *') - } - stages { - stage('Build Version') { - steps{ - script { - BUILD_VERSION_GENERATED = VersionNumber( - versionNumberString: 'v${BUILD_YEAR, XX}.${BUILD_MONTH, XX}${BUILD_DAY, XX}.${BUILDS_TODAY}', - projectStartDate: '1970-01-01', - skipFailedBuilds: false) - currentBuild.displayName = BUILD_VERSION_GENERATED - env.BUILD_VERSION = BUILD_VERSION_GENERATED - } - } - } - stage('Checkout source code') { - steps { - cleanWs() - checkout scm - } - } - stage('Build Docker images') { - steps { - script { - configFileProvider([configFile(fileId: 'update-docker-description', targetLocation: 'update.sh')]) { - // List all directories, each directory contains a plugin - def pluginDirectories = """${sh ( - script: "ls -d */", - returnStdout: true - )}""" - // Iterate over each plugin directory - pluginDirectories.split().each { repo -> - // Truncate hanging "/" for each directory - def pluginName = repo.getAt(0..(repo.length() - 2)) - // Check if VERSION file for each plugin file has changed - def isChanged = "0" - - if (env.GIT_PREVIOUS_SUCCESSFUL_COMMIT) { - isChanged = """${sh ( - script: "git diff --name-only ${GIT_PREVIOUS_SUCCESSFUL_COMMIT} ${GIT_COMMIT} | grep ${pluginName}/VERSION", - returnStatus: true - )}""" - } - if (isChanged == "0" && pluginName != "utils") { - dir("${WORKSPACE}/${pluginName}") { - def dockerVersion = readFile(file: 'VERSION').trim() - docker.withRegistry('https://registry-1.docker.io/v2/', 'f16c74f9-0a60-4882-b6fd-bec3b0136b84') { - def image = docker.build("labshare/${pluginName}", '--no-cache ./') - image.push() - image.push(dockerVersion) - } - - env.PROJECT_NAME = "${pluginName}" - env.FULL_DESC = readFile(file: 'README.md') - env.BRIEF_DESC = "${PROJECT_URL}/tree/master/${PROJECT_NAME}" - } - withCredentials([usernamePassword(credentialsId: 'f16c74f9-0a60-4882-b6fd-bec3b0136b84', usernameVariable: 'DOCKER_USER', passwordVariable: 'DOCKER_PW')]) { - sh "sh ./update.sh" - } - } - } - } - } - } - } - } -} \ No newline at end of file diff --git a/src/polus/tabular/__init__.py b/src/polus/tabular/__init__.py deleted file mode 100644 index c5b74cc..0000000 --- a/src/polus/tabular/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -"""Initialize polus-plugins module.""" - -import logging -from pathlib import Path -from typing import Union - -from polus.tabular._plugins.classes import ( - ComputePlugin, # pylint: disable=unused-import -) -from polus.tabular._plugins.classes import Plugin # pylint: disable=unused-import -from polus.tabular._plugins.classes import get_plugin # pylint: disable=unused-import -from polus.tabular._plugins.classes import list_plugins # pylint: disable=unused-import -from polus.tabular._plugins.classes import load_config # pylint: disable=unused-import -from polus.tabular._plugins.classes import refresh # pylint: disable=unused-import -from polus.tabular._plugins.classes import remove_all # pylint: disable=unused-import -from polus.tabular._plugins.classes import ( # pylint: disable=unused-import - remove_plugin, -) -from polus.tabular._plugins.classes import ( # pylint: disable=unused-import - submit_plugin, -) -from polus.tabular._plugins.update import ( # pylint: disable=unused-import - update_nist_plugins, -) -from polus.tabular._plugins.update import ( # pylint: disable=unused-import - update_polus_plugins, -) - -""" -Set up logging for the module -""" -logger = logging.getLogger("polus.tabular") - -with Path(__file__).parent.joinpath("_plugins/VERSION").open( - "r", - encoding="utf-8", -) as version_file: - VERSION = version_file.read().strip() - - -refresh() # calls the refresh method when library is imported - - -def __getattr__(name: str) -> Union[Plugin, ComputePlugin, list]: - if name == "list": - return list_plugins() - if name in list_plugins(): - return get_plugin(name) - if name in ["__version__", "VERSION"]: - return VERSION - msg = f"module '{__name__}' has no attribute '{name}'" - raise AttributeError(msg) - - -__all__ = [ - "refresh", - "submit_plugin", - "get_plugin", - "load_config", - "list_plugins", - "update_polus_plugins", - "update_nist_plugins", - "remove_all", - "remove_plugin", -] diff --git a/src/polus/tabular/_plugins/VERSION b/src/polus/tabular/_plugins/VERSION deleted file mode 100644 index 17e51c3..0000000 --- a/src/polus/tabular/_plugins/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.1.1 diff --git a/src/polus/tabular/_plugins/__init__.py b/src/polus/tabular/_plugins/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/polus/tabular/_plugins/_compat.py b/src/polus/tabular/_plugins/_compat.py deleted file mode 100644 index 190aa0f..0000000 --- a/src/polus/tabular/_plugins/_compat.py +++ /dev/null @@ -1,4 +0,0 @@ -"""Compat of Pydantic.""" -import pydantic - -PYDANTIC_V2 = pydantic.VERSION.startswith("2.") diff --git a/src/polus/tabular/_plugins/classes/__init__.py b/src/polus/tabular/_plugins/classes/__init__.py deleted file mode 100644 index c6be7d4..0000000 --- a/src/polus/tabular/_plugins/classes/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -"""Plugin classes and functions.""" - -from polus.tabular._plugins.classes.plugin_classes import PLUGINS -from polus.tabular._plugins.classes.plugin_classes import ComputePlugin -from polus.tabular._plugins.classes.plugin_classes import Plugin -from polus.tabular._plugins.classes.plugin_classes import _load_plugin -from polus.tabular._plugins.classes.plugin_classes import get_plugin -from polus.tabular._plugins.classes.plugin_classes import list_plugins -from polus.tabular._plugins.classes.plugin_classes import load_config -from polus.tabular._plugins.classes.plugin_classes import refresh -from polus.tabular._plugins.classes.plugin_classes import remove_all -from polus.tabular._plugins.classes.plugin_classes import remove_plugin -from polus.tabular._plugins.classes.plugin_classes import submit_plugin - -__all__ = [ - "Plugin", - "ComputePlugin", - "submit_plugin", - "get_plugin", - "refresh", - "list_plugins", - "remove_plugin", - "remove_all", - "load_config", - "_load_plugin", - "PLUGINS", -] diff --git a/src/polus/tabular/_plugins/classes/plugin_base.py b/src/polus/tabular/_plugins/classes/plugin_base.py deleted file mode 100644 index af22db4..0000000 --- a/src/polus/tabular/_plugins/classes/plugin_base.py +++ /dev/null @@ -1,311 +0,0 @@ -"""Methods for all plugin objects.""" -# pylint: disable=W1203, W0212, enable=W1201 -import enum -import json -import logging -import random -import signal -from pathlib import Path -from typing import Any -from typing import Optional -from typing import TypeVar -from typing import Union - -import fsspec -import yaml # type: ignore -from cwltool.context import RuntimeContext -from cwltool.factory import Factory -from cwltool.utils import CWLObjectType -from polus.tabular._plugins.cwl import CWL_BASE_DICT -from polus.tabular._plugins.io import input_to_cwl -from polus.tabular._plugins.io import io_to_yml -from polus.tabular._plugins.io import output_to_cwl -from polus.tabular._plugins.io import outputs_cwl -from polus.tabular._plugins.utils import name_cleaner -from python_on_whales import docker - -logger = logging.getLogger("polus.tabular") - -StrPath = TypeVar("StrPath", str, Path) - - -class IOKeyError(Exception): - """Raised when trying to set invalid I/O parameter.""" - - -class MissingInputValuesError(Exception): - """Raised when there are required input values that have not been set.""" - - -class BasePlugin: - """Base Class for Plugins.""" - - def _check_inputs(self) -> None: - """Check if all required inputs have been set.""" - _in = [x for x in self.inputs if x.required and not x.value] # type: ignore - if len(_in) > 0: - msg = f"{[x.name for x in _in]} are required inputs but have not been set" - raise MissingInputValuesError( - msg, # type: ignore - ) - - @property - def organization(self) -> str: - """Plugin container's organization.""" - return self.containerId.split("/")[0] - - def load_config(self, path: StrPath) -> None: - """Load configured plugin from file.""" - with Path(path).open(encoding="utf=8") as fw: - config = json.load(fw) - inp = config["inputs"] - out = config["outputs"] - for k, v in inp.items(): - if k in self._io_keys: - setattr(self, k, v) - for k, v in out.items(): - if k in self._io_keys: - setattr(self, k, v) - logger.debug(f"Loaded config from {path}") - - def run( - self, - gpus: Union[None, str, int] = "all", - **kwargs: Union[None, str, int], - ) -> None: - """Run plugin in Docker container.""" - self._check_inputs() - inp_dirs = [x for x in self.inputs if isinstance(x.value, Path)] - out_dirs = [x for x in self.outputs if isinstance(x.value, Path)] - - inp_dirs_dict = {x: f"/data/inputs/input{n}" for (n, x) in enumerate(inp_dirs)} - out_dirs_dict = { - x: f"/data/outputs/output{n}" for (n, x) in enumerate(out_dirs) - } - - mnts_in = [ - [f"type=bind,source={k},target={v},readonly"] # must be a list of lists - for (k, v) in inp_dirs_dict.items() - ] - mnts_out = [ - [f"type=bind,source={k},target={v}"] # must be a list of lists - for (k, v) in out_dirs_dict.items() - ] - - mnts = mnts_in + mnts_out - args = [] - - for i in self.inputs: - if i.value is not None: # do not include those with value=None - i._validate() - args.append(f"--{i.name}") - - if isinstance(i.value, Path): - args.append(inp_dirs_dict[str(i.value)]) - - elif isinstance(i.value, enum.Enum): - args.append(str(i.value._name_)) - - else: - args.append(str(i.value)) - - for o in self.outputs: - if o.value is not None: # do not include those with value=None - o._validate() - args.append(f"--{o.name}") - - if isinstance(o.value, Path): - args.append(out_dirs_dict[str(o.value)]) - - elif isinstance(o.value, enum.Enum): - args.append(str(o.value._name_)) - - else: - args.append(str(o.value)) - - random_int = random.randint(10, 99) # noqa: S311 # only for naming - container_name = f"polus{random_int}" - - def sig( - signal, # noqa # pylint: disable=W0613, W0621 - frame, # noqa # pylint: disable=W0613, W0621 - ) -> None: # signal handler to kill container when KeyboardInterrupt - logger.info(f"Exiting container {container_name}") - docker.kill(container_name) - - signal.signal( - signal.SIGINT, - sig, - ) # make of sig the handler for KeyboardInterrupt - if gpus is None: - logger.info( - f"""Running container without GPU. {self.__class__.__name__} - version {self.version!s}""", - ) - docker_ = docker.run( - self.containerId, - args, - name=container_name, - remove=True, - mounts=mnts, - **kwargs, - ) - print(docker_) # noqa - else: - logger.info( - f"""Running container with GPU: --gpus {gpus}. - {self.__class__.__name__} version {self.version!s}""", - ) - docker_ = docker.run( - self.containerId, - args, - gpus=gpus, - name=container_name, - remove=True, - mounts=mnts, - **kwargs, - ) - print(docker_) # noqa - - @property - def manifest(self) -> dict: - """Plugin manifest.""" - manifest_ = json.loads(self.json(exclude={"_io_keys", "versions", "id"})) - manifest_["version"] = manifest_["version"]["version"] - return manifest_ - - def __getattribute__(self, name: str) -> Any: # noqa - if name == "__class__": # pydantic v2 change - return super().__getattribute__(name) - if name != "_io_keys" and hasattr(self, "_io_keys") and name in self._io_keys: - value = self._io_keys[name].value - if isinstance(value, enum.Enum): - value = value.name - return value - - return super().__getattribute__(name) - - def __setattr__(self, name: str, value: Any) -> None: # noqa - if name == "_fs": - if not issubclass(type(value), fsspec.spec.AbstractFileSystem): - msg = "_fs must be an fsspec FileSystem" - raise ValueError(msg) - for i in self.inputs: - i._fs = value - for o in self.outputs: - o._fs = value - return - - if name != "_io_keys" and hasattr(self, "_io_keys"): - if name in self._io_keys: - logger.debug( - f"Value of {name} in {self.__class__.__name__} set to {value}", - ) - self._io_keys[name].value = value - return - msg = ( - f"Attempting to set {name} in " - "{self.__class__.__name__} but " - "{{name}} is not a valid I/O parameter" - ) - raise IOKeyError( - msg, - ) - - super().__setattr__(name, value) - - def _to_cwl(self) -> dict: - """Return CWL yml as dict.""" - cwl_dict = CWL_BASE_DICT - cwl_dict["inputs"] = {} - cwl_dict["outputs"] = {} - inputs = [input_to_cwl(x) for x in self.inputs] - inputs = inputs + [output_to_cwl(x) for x in self.outputs] - for inp in inputs: - cwl_dict["inputs"].update(inp) - outputs = [outputs_cwl(x) for x in self.outputs] - for out in outputs: - cwl_dict["outputs"].update(out) - cwl_dict["requirements"]["DockerRequirement"]["dockerPull"] = self.containerId - return cwl_dict - - def save_cwl(self, path: StrPath) -> Path: - """Save plugin as CWL command line tool.""" - if str(path).rsplit(".", maxsplit=1)[-1] != "cwl": - msg = "path must end in .cwl" - raise ValueError(msg) - with Path(path).open("w", encoding="utf-8") as file: - yaml.dump(self._to_cwl(), file) - return Path(path) - - @property - def _cwl_io(self) -> dict: - """Dict of I/O for CWL.""" - return { - x.name: io_to_yml(x) for x in self._io_keys.values() if x.value is not None - } - - def save_cwl_io(self, path: StrPath) -> Path: - """Save plugin's I/O values to yml file. - - To be used with CWL Command Line Tool. - """ - self._check_inputs() - if str(path).rsplit(".", maxsplit=1)[-1] != "yml": - msg = "path must end in .yml" - raise ValueError(msg) - with Path(path).open("w", encoding="utf-8") as file: - yaml.dump(self._cwl_io, file) - return Path(path) - - def run_cwl( - self, - cwl_path: Optional[StrPath] = None, - io_path: Optional[StrPath] = None, - ) -> Union[CWLObjectType, str, None]: - """Run configured plugin in CWL. - - Run plugin as a CWL command line tool after setting I/O values. - Two files will be generated: a CWL (`.cwl`) command line tool - and an I/O file (`.yml`). They will be generated in - current working directory if no paths are specified. Optional paths - for these files can be specified with arguments `cwl_path`, - and `io_path` respectively. - - Args: - cwl_path: [Optional] target path for `.cwl` file - io_path: [Optional] target path for `.yml` file - - """ - if not self.outDir: - msg = "" - raise ValueError(msg) - - if not cwl_path: - _p = Path.cwd().joinpath(name_cleaner(self.name) + ".cwl") - _cwl = self.save_cwl(_p) - else: - _cwl = self.save_cwl(cwl_path) - - if not io_path: - _p = Path.cwd().joinpath(name_cleaner(self.name) + ".yml") - self.save_cwl_io(_p) # saves io to make it visible to user - else: - self.save_cwl_io(io_path) # saves io to make it visible to user - - outdir_path = self.outDir.parent.relative_to(Path.cwd()) - r_c = RuntimeContext({"outdir": str(outdir_path)}) - fac = Factory(runtime_context=r_c) - cwl = fac.make(str(_cwl)) - return cwl(**self._cwl_io) # object's io dict is used instead of .yml file - - def __lt__(self, other: "BasePlugin") -> bool: - return self.version < other.version - - def __gt__(self, other: "BasePlugin") -> bool: - return other.version < self.version - - def __repr__(self) -> str: - return ( - f"{self.__class__.__name__}(name='{self.name}', version={self.version!s})" - ) diff --git a/src/polus/tabular/_plugins/classes/plugin_classes.py b/src/polus/tabular/_plugins/classes/plugin_classes.py deleted file mode 100644 index 68e3e9b..0000000 --- a/src/polus/tabular/_plugins/classes/plugin_classes.py +++ /dev/null @@ -1,472 +0,0 @@ -"""Classes for Plugin objects containing methods to configure, run, and save.""" -# pylint: disable=W1203, W0212, enable=W1201 -import json -import logging -import shutil -import uuid -from copy import deepcopy -from pathlib import Path -from typing import Any -from typing import Optional -from typing import Union - -from polus.tabular._plugins._compat import PYDANTIC_V2 -from polus.tabular._plugins.classes.plugin_base import BasePlugin -from polus.tabular._plugins.io._io import DuplicateVersionFoundError -from polus.tabular._plugins.io._io import Version -from polus.tabular._plugins.io._io import _in_old_to_new -from polus.tabular._plugins.io._io import _ui_old_to_new -from polus.tabular._plugins.manifests import InvalidManifestError -from polus.tabular._plugins.manifests import _load_manifest -from polus.tabular._plugins.manifests import validate_manifest -from polus.tabular._plugins.models import ComputeSchema -from polus.tabular._plugins.models import PluginUIInput -from polus.tabular._plugins.models import PluginUIOutput -from polus.tabular._plugins.models import WIPPPluginManifest -from polus.tabular._plugins.utils import cast_version -from polus.tabular._plugins.utils import name_cleaner -from pydantic import ConfigDict - -logger = logging.getLogger("polus.tabular") -PLUGINS: dict[str, dict] = {} -# PLUGINS = {"BasicFlatfieldCorrectionPlugin": -# {Version('0.1.4'): Path(<...>), Version('0.1.5'): Path(<...>)}. -# "VectorToLabel": {Version(...)}} - -""" -Paths and Fields -""" -# Location to store any discovered plugin manifests -_PLUGIN_DIR = Path(__file__).parent.parent.joinpath("manifests") - - -def refresh() -> None: - """Refresh the plugin list.""" - organizations = [ - x for x in _PLUGIN_DIR.iterdir() if x.name != "__pycache__" and x.is_dir() - ] # ignore __pycache__ - - PLUGINS.clear() - - for org in organizations: - for file in org.iterdir(): - if file.suffix == ".py": - continue - - try: - plugin = validate_manifest(file) - except InvalidManifestError: - logger.warning(f"Validation error in {file!s}") - except BaseException as exc: # pylint: disable=W0718 - logger.warning(f"Unexpected error {exc} with {file!s}") - raise exc - - else: - key = name_cleaner(plugin.name) - # Add version and path to VERSIONS - if key not in PLUGINS: - PLUGINS[key] = {} - if ( - plugin.version in PLUGINS[key] - and file != PLUGINS[key][plugin.version] - ): - msg = ( - "Found duplicate version of plugin" - f"{plugin.name} in {_PLUGIN_DIR}" - ) - raise DuplicateVersionFoundError( - msg, - ) - PLUGINS[key][plugin.version] = file - - -def list_plugins() -> list: - """List all local plugins.""" - output = list(PLUGINS.keys()) - output.sort() - return output - - -def _get_config(plugin: Union["Plugin", "ComputePlugin"], class_: str) -> dict: - if PYDANTIC_V2: - model_ = json.loads(plugin.model_dump_json()) - model_["_io_keys"] = deepcopy(plugin._io_keys) # type: ignore - else: - # ignore mypy if pydantic < 2.0.0 - model_ = plugin.dict() # type: ignore - # iterate over I/O to convert to dict - for io_name, io in model_["_io_keys"].items(): - if PYDANTIC_V2: - model_["_io_keys"][io_name] = json.loads(io.model_dump_json()) - # overwrite val if enum - if io.type.value == "enum": - model_["_io_keys"][io_name]["value"] = io.value.name # str - elif io["type"] == "enum": # pydantic V1 - val_ = io["value"].name # mapDirectory.raw - model_["_io_keys"][io_name]["value"] = val_.split(".")[-1] # raw - for inp in model_["inputs"]: - inp["value"] = None - model_["class"] = class_ - return model_ - - -class Plugin(WIPPPluginManifest, BasePlugin): - """WIPP Plugin Class. - - Contains methods to configure, run, and save plugins. - - Attributes: - versions: A list of local available versions for this plugin. - - Methods: - save_manifest(path): save plugin manifest to specified path - """ - - id: uuid.UUID # noqa: A003 - if PYDANTIC_V2: - model_config = ConfigDict(extra="allow", frozen=True) - else: - - class Config: # pylint: disable=R0903 - """Config.""" - - extra = "allow" - allow_mutation = False - - def __init__(self, _uuid: bool = True, **data: dict) -> None: - """Init a plugin object from manifest.""" - if _uuid: - data["id"] = uuid.uuid4() # type: ignore - else: - data["id"] = uuid.UUID(str(data["id"])) # type: ignore - - if not PYDANTIC_V2: # pydantic V1 - data["version"] = cast_version(data["version"]) - - super().__init__(**data) - - if not PYDANTIC_V2: # pydantic V1 - self.Config.allow_mutation = True - - self._io_keys = {i.name: i for i in self.inputs} - self._io_keys.update({o.name: o for o in self.outputs}) - - if not self.author: - warn_msg = ( - f"The plugin ({self.name}) is missing the author field. " - "This field is not required but should be filled in." - ) - logger.warning(warn_msg) - - @property - def versions(self) -> list: # cannot be in PluginMethods because PLUGINS lives here - """Return list of local versions of a Plugin.""" - return list(PLUGINS[name_cleaner(self.name)]) - - def to_compute( - self, - hardware_requirements: Optional[dict] = None, - ) -> type[ComputeSchema]: - """Convert WIPP Plugin object to Compute Plugin object.""" - data = deepcopy(self.manifest) - return ComputePlugin( - hardware_requirements=hardware_requirements, - _from_old=True, - **data, - ) - - def save_manifest( - self, - path: Union[str, Path], - hardware_requirements: Optional[dict] = None, - compute: bool = False, - ) -> None: - """Save plugin manifest to specified path.""" - if compute: - with Path(path).open("w", encoding="utf-8") as file: - self.to_compute( - hardware_requirements=hardware_requirements, - ).save_manifest(path) - else: - with Path(path).open("w", encoding="utf-8") as file: - dict_ = self.manifest - json.dump( - dict_, - file, - indent=4, - ) - - logger.debug(f"Saved manifest to {path}") - - def __setattr__(self, name: str, value: Any) -> None: # noqa: ANN401 - """Set I/O parameters as attributes.""" - BasePlugin.__setattr__(self, name, value) - - def save_config(self, path: Union[str, Path]) -> None: - """Save manifest with configured I/O parameters to specified path.""" - with Path(path).open("w", encoding="utf-8") as file: - json.dump(_get_config(self, "WIPP"), file, indent=4, default=str) - logger.debug(f"Saved config to {path}") - - def __repr__(self) -> str: - """Print plugin name and version.""" - return BasePlugin.__repr__(self) - - -class ComputePlugin(ComputeSchema, BasePlugin): - """Compute Plugin Class. - - Contains methods to configure, run, and save plugins. - - Attributes: - versions: A list of local available versions for this plugin. - - Methods: - save_manifest(path): save plugin manifest to specified path - """ - - if PYDANTIC_V2: - model_config = ConfigDict(extra="allow", frozen=True) - else: # pydantic V1 - - class Config: # pylint: disable=R0903 - """Config.""" - - extra = "allow" - allow_mutation = False - - def __init__( - self, - hardware_requirements: Optional[dict] = None, - _from_old: bool = False, - _uuid: bool = True, - **data: dict, - ) -> None: - """Init a plugin object from manifest.""" - if _uuid: - data["id"] = uuid.uuid4() # type: ignore - else: - data["id"] = uuid.UUID(str(data["id"])) # type: ignore - - if _from_old: - - def _convert_input(dict_: dict) -> dict: - dict_["type"] = _in_old_to_new(dict_["type"]) - return dict_ - - def _convert_output(dict_: dict) -> dict: - dict_["type"] = "path" - return dict_ - - def _ui_in(dict_: dict) -> PluginUIInput: # assuming old all ui input - # assuming format inputs. ___ - inp = dict_["key"].split(".")[-1] # e.g inpDir - try: - type_ = [x["type"] for x in data["inputs"] if x["name"] == inp][ - 0 - ] # get type from i/o - except IndexError: - type_ = "string" # default to string - except BaseException as exc: - raise exc - - dict_["type"] = _ui_old_to_new(type_) - return PluginUIInput(**dict_) - - def _ui_out(dict_: dict) -> PluginUIOutput: - new_dict_ = deepcopy(dict_) - new_dict_["name"] = "outputs." + new_dict_["name"] - new_dict_["type"] = _ui_old_to_new(new_dict_["type"]) - return PluginUIOutput(**new_dict_) - - data["inputs"] = [_convert_input(x) for x in data["inputs"]] # type: ignore - data["outputs"] = [ - _convert_output(x) for x in data["outputs"] - ] # type: ignore - data["pluginHardwareRequirements"] = {} - data["ui"] = [_ui_in(x) for x in data["ui"]] # type: ignore - data["ui"].extend( # type: ignore[attr-defined] - [_ui_out(x) for x in data["outputs"]], - ) - - if hardware_requirements: - for k, v in hardware_requirements.items(): - data["pluginHardwareRequirements"][k] = v - - data["version"] = cast_version(data["version"]) - super().__init__(**data) - self.Config.allow_mutation = True - self._io_keys = {i.name: i for i in self.inputs} - self._io_keys.update({o.name: o for o in self.outputs}) # type: ignore - - if not self.author: - warn_msg = ( - f"The plugin ({self.name}) is missing the author field. " - "This field is not required but should be filled in." - ) - logger.warning(warn_msg) - - @property - def versions(self) -> list: # cannot be in PluginMethods because PLUGINS lives here - """Return list of local versions of a Plugin.""" - return list(PLUGINS[name_cleaner(self.name)]) - - def __setattr__(self, name: str, value: Any) -> None: # noqa: ANN401 - """Set I/O parameters as attributes.""" - BasePlugin.__setattr__(self, name, value) - - def save_config(self, path: Union[str, Path]) -> None: - """Save configured manifest with I/O parameters to specified path.""" - with Path(path).open("w", encoding="utf-8") as file: - json.dump(_get_config(self, "Compute"), file, indent=4, default=str) - logger.debug(f"Saved config to {path}") - - def save_manifest(self, path: Union[str, Path]) -> None: - """Save plugin manifest to specified path.""" - with Path(path).open("w", encoding="utf-8") as file: - json.dump(self.manifest, file, indent=4) - logger.debug(f"Saved manifest to {path}") - - def __repr__(self) -> str: - """Print plugin name and version.""" - return BasePlugin.__repr__(self) - - -def _load_plugin( - manifest: Union[str, dict, Path], -) -> Union[Plugin, ComputePlugin]: - """Parse a manifest and return one of Plugin or ComputePlugin.""" - manifest = _load_manifest(manifest) - if "pluginHardwareRequirements" in manifest: # type: ignore[operator] - # Parse the manifest - plugin = ComputePlugin(**manifest) # type: ignore[arg-type] - else: - # Parse the manifest - plugin = Plugin(**manifest) # type: ignore[arg-type] - return plugin - - -def submit_plugin( - manifest: Union[str, dict, Path], -) -> Union[Plugin, ComputePlugin]: - """Parse a plugin and create a local copy of it. - - This function accepts a plugin manifest as a string, a dictionary (parsed - json), or a pathlib.Path object pointed at a plugin manifest. - - Args: - manifest: - A plugin manifest. It can be a url, a dictionary, - a path to a JSON file or a string that can be parsed as a dictionary - - Returns: - A Plugin object populated with information from the plugin manifest. - """ - plugin = validate_manifest(manifest) - plugin_name = name_cleaner(plugin.name) - - # Get Major/Minor/Patch versions - out_name = ( - plugin_name - + f"_M{plugin.version.major}m{plugin.version.minor}p{plugin.version.patch}.json" - ) - - # Save the manifest if it doesn't already exist in the database - organization = plugin.containerId.split("/")[0] - org_path = _PLUGIN_DIR.joinpath(organization.lower()) - org_path.mkdir(exist_ok=True, parents=True) - if not org_path.joinpath(out_name).exists(): - with org_path.joinpath(out_name).open("w", encoding="utf-8") as file: - if not PYDANTIC_V2: # pydantic V1 - manifest_ = plugin.dict() # type: ignore - manifest_["version"] = manifest_["version"]["version"] - else: # PYDANTIC V2 - manifest_ = json.loads(plugin.model_dump_json()) - json.dump(manifest_, file, indent=4) - - # Refresh plugins list - refresh() - return plugin - - -def get_plugin( - name: str, - version: Optional[str] = None, -) -> Union[Plugin, ComputePlugin]: - """Get a plugin with option to specify version. - - Return a plugin object with the option to specify a version. - The specified version's manifest must exist in manifests folder. - - Args: - name: Name of the plugin. - version: Optional version of the plugin, must follow semver. - - Returns: - Plugin object - """ - if version is None: - return _load_plugin(PLUGINS[name][max(PLUGINS[name])]) - if PYDANTIC_V2: - return _load_plugin(PLUGINS[name][Version(version)]) - return _load_plugin(PLUGINS[name][Version(**{"version": version})]) # Pydantic V1 - - -def load_config(config: Union[dict, Path, str]) -> Union[Plugin, ComputePlugin]: - """Load configured plugin from config file/dict.""" - if isinstance(config, (Path, str)): - with Path(config).open("r", encoding="utf-8") as file: - manifest_ = json.load(file) - elif isinstance(config, dict): - manifest_ = config - else: - msg = "config must be a dict, str, or a path" - raise TypeError(msg) - io_keys_ = manifest_["_io_keys"] - class_ = manifest_["class"] - manifest_.pop("class", None) - if class_ == "Compute": - plugin_ = ComputePlugin(_uuid=False, **manifest_) - elif class_ == "WIPP": - plugin_ = Plugin(_uuid=False, **manifest_) - else: - msg = "Invalid value of class" - raise ValueError(msg) - for key, value_ in io_keys_.items(): - val = value_["value"] - if val is not None: # exclude those values not set - setattr(plugin_, key, val) - return plugin_ - - -def remove_plugin(plugin: str, version: Optional[Union[str, list[str]]] = None) -> None: - """Remove plugin from the local database.""" - if version is None: - for plugin_version in PLUGINS[plugin]: - remove_plugin(plugin, plugin_version) - else: - if isinstance(version, list): - for version_ in version: - remove_plugin(plugin, version_) - return - if not PYDANTIC_V2: # pydantic V1 - if not isinstance(version, Version): - version_ = cast_version(version) - else: - version_ = version - else: # pydanitc V2 - version_ = Version(version) if not isinstance(version, Version) else version - path = PLUGINS[plugin][version_] - path.unlink() - refresh() - - -def remove_all() -> None: - """Remove all plugins from the local database.""" - organizations = [ - x for x in _PLUGIN_DIR.iterdir() if x.name != "__pycache__" and x.is_dir() - ] # ignore __pycache__ - logger.warning("Removing all plugins from local database") - for org in organizations: - shutil.rmtree(org) - refresh() diff --git a/src/polus/tabular/_plugins/cwl/__init__.py b/src/polus/tabular/_plugins/cwl/__init__.py deleted file mode 100644 index 966ef2d..0000000 --- a/src/polus/tabular/_plugins/cwl/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .cwl import CWL_BASE_DICT - -__all__ = ["CWL_BASE_DICT"] diff --git a/src/polus/tabular/_plugins/cwl/base.cwl b/src/polus/tabular/_plugins/cwl/base.cwl deleted file mode 100644 index 7a86922..0000000 --- a/src/polus/tabular/_plugins/cwl/base.cwl +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env cwl-runner - -cwlVersion: v1.2 -class: CommandLineTool - -requirements: - DockerRequirement: - dockerPull: - InitialWorkDirRequirement: - listing: - - writable: true - entry: $(inputs.outDir) - InlineJavascriptRequirement: {} - -inputs: - -outputs: diff --git a/src/polus/tabular/_plugins/cwl/cwl.py b/src/polus/tabular/_plugins/cwl/cwl.py deleted file mode 100644 index 59a1163..0000000 --- a/src/polus/tabular/_plugins/cwl/cwl.py +++ /dev/null @@ -1,7 +0,0 @@ -from pathlib import Path - -import yaml # type: ignore - -PATH = Path(__file__) -with open(PATH.with_name("base.cwl"), "rb") as cwl_file: - CWL_BASE_DICT = yaml.full_load(cwl_file) diff --git a/src/polus/tabular/_plugins/gh.py b/src/polus/tabular/_plugins/gh.py deleted file mode 100644 index 791e0a7..0000000 --- a/src/polus/tabular/_plugins/gh.py +++ /dev/null @@ -1,65 +0,0 @@ -"""GitHub utilties.""" -import logging -import os -from urllib.parse import urljoin - -import github - -from polus.tabular._plugins.classes import submit_plugin - -logger = logging.getLogger("polus.tabular") - -""" -Initialize the Github interface -""" - - -def _init_github(auth=None): - if auth is None: - # Try to get an auth key from an environment variable - auth = os.environ.get("GITHUB_AUTH", None) - - if auth is None: - gh = github.Github() - logger.warning("Initialized Github connection with no user token.") - return gh - else: - logger.debug("Found auth token in GITHUB_AUTH environment variable.") - - else: - logger.debug("Github auth token supplied as input.") - - gh = github.Github(login_or_token=auth) - logger.debug( - f"Initialized Github connection with token for user: {gh.get_user().login}" - ) - - return gh - - -def add_plugin_from_gh( - user: str, - branch: str, - plugin: str, - repo: str = "polus-plugins", - manifest_name: str = "plugin.json", -): - """Add plugin from GitHub. - - This function adds a plugin hosted on GitHub and returns a Plugin object. - - Args: - user: GitHub username - branch: GitHub branch - plugin: Plugin's name - repo: Name of GitHub repository, default is `polus-plugins` - manifest_name: Name of manifest file, default is `plugin.json` - - Returns: - A Plugin object populated with information from the plugin manifest. - """ - l1 = [user, repo, branch, plugin, manifest_name] - u = "/".join(l1) - url = urljoin("https://raw.githubusercontent.com", u) - logger.info("Adding %s" % url) - return submit_plugin(url, refresh=True) diff --git a/src/polus/tabular/_plugins/io/__init__.py b/src/polus/tabular/_plugins/io/__init__.py deleted file mode 100644 index 0687084..0000000 --- a/src/polus/tabular/_plugins/io/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -"""Init IO module.""" - -from polus.tabular._plugins.io._io import Input -from polus.tabular._plugins.io._io import IOBase -from polus.tabular._plugins.io._io import Output -from polus.tabular._plugins.io._io import Version -from polus.tabular._plugins.io._io import input_to_cwl -from polus.tabular._plugins.io._io import io_to_yml -from polus.tabular._plugins.io._io import output_to_cwl -from polus.tabular._plugins.io._io import outputs_cwl - -__all__ = [ - "Input", - "Output", - "IOBase", - "Version", - "io_to_yml", - "outputs_cwl", - "input_to_cwl", - "output_to_cwl", -] diff --git a/src/polus/tabular/_plugins/io/_io.py b/src/polus/tabular/_plugins/io/_io.py deleted file mode 100644 index aa44cf9..0000000 --- a/src/polus/tabular/_plugins/io/_io.py +++ /dev/null @@ -1,597 +0,0 @@ -# type: ignore -# ruff: noqa: S101, A003 -# pylint: disable=no-self-argument, C0412 -"""Plugins I/O utilities.""" -import enum -import logging -import pathlib -import re -from functools import singledispatch -from functools import singledispatchmethod -from typing import Any -from typing import Optional -from typing import TypeVar -from typing import Union - -import fsspec -from polus.tabular._plugins._compat import PYDANTIC_V2 -from pydantic import BaseModel -from pydantic import Field -from pydantic import PrivateAttr - -if PYDANTIC_V2: - from typing import Annotated - - from pydantic import RootModel - from pydantic import StringConstraints - from pydantic import field_validator -else: - from pydantic import constr - from pydantic import validator - -logger = logging.getLogger("polus.tabular") - -""" -Enums for validating plugin input, output, and ui components. -""" -WIPP_TYPES = { - "collection": pathlib.Path, - "pyramid": pathlib.Path, - "csvCollection": pathlib.Path, - "genericData": pathlib.Path, - "stitchingVector": pathlib.Path, - "notebook": pathlib.Path, - "tensorflowModel": pathlib.Path, - "tensorboardLogs": pathlib.Path, - "pyramidAnnotation": pathlib.Path, - "integer": int, - "number": float, - "string": str, - "boolean": bool, - "array": str, - "enum": enum.Enum, - "path": pathlib.Path, -} - - -class InputTypes(str, enum.Enum): # wipp schema - """Enum of Input Types for WIPP schema.""" - - COLLECTION = "collection" - PYRAMID = "pyramid" - CSVCOLLECTION = "csvCollection" - GENERICDATA = "genericData" - STITCHINGVECTOR = "stitchingVector" - NOTEBOOK = "notebook" - TENSORFLOWMODEL = "tensorflowModel" - TENSORBOARDLOGS = "tensorboardLogs" - PYRAMIDANNOTATION = "pyramidAnnotation" - INTEGER = "integer" - NUMBER = "number" - STRING = "string" - BOOLEAN = "boolean" - ARRAY = "array" - ENUM = "enum" - - -class OutputTypes(str, enum.Enum): # wipp schema - """Enum for Output Types for WIPP schema.""" - - COLLECTION = "collection" - PYRAMID = "pyramid" - CSVCOLLECTION = "csvCollection" - GENERICDATA = "genericData" - STITCHINGVECTOR = "stitchingVector" - NOTEBOOK = "notebook" - TENSORFLOWMODEL = "tensorflowModel" - TENSORBOARDLOGS = "tensorboardLogs" - PYRAMIDANNOTATION = "pyramidAnnotation" - - -def _in_old_to_new(old: str) -> str: # map wipp InputType to compute schema's InputType - """Map an InputType from wipp schema to one of compute schema.""" - d = {"integer": "number", "enum": "string"} - if old in ["string", "array", "number", "boolean"]: - return old - if old in d: - return d[old] # integer or enum - return "path" # everything else - - -def _ui_old_to_new(old: str) -> str: # map wipp InputType to compute schema's UIType - """Map an InputType from wipp schema to a UIType of compute schema.""" - type_dict = { - "string": "text", - "boolean": "checkbox", - "number": "number", - "array": "text", - "integer": "number", - } - if old in type_dict: - return type_dict[old] - return "text" - - -FileSystem = TypeVar("FileSystem", bound=fsspec.spec.AbstractFileSystem) - - -class IOBase(BaseModel): # pylint: disable=R0903 - """Base Class for I/O arguments.""" - - type: Any = None - options: Optional[dict] = None - value: Optional[Any] = None - id_: Optional[Any] = None - _fs: Optional[FileSystem] = PrivateAttr( - default=None, - ) # type checking is done at plugin level - - def _validate(self) -> None: # pylint: disable=R0912 - value = self.value - - if value is None: - if self.required: - msg = f""" - The input value ({self.name}) is required, - but the value was not set.""" - raise TypeError( - msg, - ) - - return - - if self.type == InputTypes.ENUM: - try: - if isinstance(value, str): - value = enum.Enum(self.name, self.options["values"])[value] - elif not isinstance(value, enum.Enum): - raise ValueError - - except KeyError: - logging.error( - f""" - Value ({value}) is not a valid value - for the enum input ({self.name}). - Must be one of {self.options['values']}. - """, - ) - raise - else: - if isinstance(self.type, (InputTypes, OutputTypes)): # wipp - value = WIPP_TYPES[self.type](value) - else: - value = WIPP_TYPES[self.type.value]( - value, - ) # compute, type does not inherit from str - - if isinstance(value, pathlib.Path): - value = value.absolute() - if self._fs: - assert self._fs.exists( - str(value), - ), f"{value} is invalid or does not exist" - assert self._fs.isdir( - str(value), - ), f"{value} is not a valid directory" - else: - assert value.exists(), f"{value} is invalid or does not exist" - assert value.is_dir(), f"{value} is not a valid directory" - - super().__setattr__("value", value) - - def __setattr__(self, name: str, value: Any) -> None: # ruff: noqa: ANN401 - """Set I/O attributes.""" - if name not in ["value", "id", "_fs"]: - # Don't permit any other values to be changed - msg = f"Cannot set property: {name}" - raise TypeError(msg) - - super().__setattr__(name, value) - - if name == "value": - self._validate() - - -class Output(IOBase): # pylint: disable=R0903 - """Required until JSON schema is fixed.""" - - if PYDANTIC_V2: - name: Annotated[ - str, - StringConstraints(pattern=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$"), - ] = Field( - ..., - examples=["outputCollection"], - title="Output name", - ) - description: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( - ..., - examples=["Output collection"], - title="Output description", - ) - else: - name: constr(regex=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$") = Field( - ..., - examples=["outputCollection"], - title="Output name", - ) - description: constr(regex=r"^(.*)$") = Field( - ..., - examples=["Output collection"], - title="Output description", - ) - type: OutputTypes = Field( - ..., - examples=["stitchingVector", "collection"], - title="Output type", - ) - - -class Input(IOBase): # pylint: disable=R0903 - """Required until JSON schema is fixed.""" - - if PYDANTIC_V2: - name: Annotated[ - str, - StringConstraints(pattern=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$"), - ] = Field( - ..., - description="Input name as expected by the plugin CLI", - examples=["inputImages", "fileNamePattern", "thresholdValue"], - title="Input name", - ) - description: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( - ..., - examples=["Input Images"], - title="Input description", - ) - else: - name: constr(regex=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$") = Field( - ..., - description="Input name as expected by the plugin CLI", - examples=["inputImages", "fileNamePattern", "thresholdValue"], - title="Input name", - ) - description: constr(regex=r"^(.*)$") = Field( - ..., - examples=["Input Images"], - title="Input description", - ) - type: InputTypes - required: Optional[bool] = Field( - True, - description="Whether an input is required or not", - examples=[True], - title="Required input", - ) - - def __init__(self, **data) -> None: # ruff: noqa: ANN003 - """Initialize input.""" - super().__init__(**data) - - if self.description is None: - logger.warning( - f""" - The input ({self.name}) is missing the description field. - This field is not required but should be filled in. - """, - ) - - -def _check_version_number(value: Union[str, int]) -> bool: - if isinstance(value, int): - value = str(value) - if "-" in value: - value = value.split("-")[0] - if len(value) > 1 and value[0] == "0": - return False - return bool(re.match(r"^\d+$", value)) - - -if PYDANTIC_V2: - - class Version(RootModel): - """SemVer object.""" - - root: str - - @field_validator("root") - @classmethod - def semantic_version( - cls, - value, - ) -> Any: # ruff: noqa: ANN202, N805, ANN001 - """Pydantic Validator to check semver.""" - version = value.split(".") - - assert ( - len(version) == 3 # ruff: noqa: PLR2004 - ), f""" - Invalid version ({value}). Version must follow - semantic versioning (see semver.org)""" - if "-" in version[-1]: # with hyphen - idn = version[-1].split("-")[-1] - id_reg = re.compile("[0-9A-Za-z-]+") - assert bool( - id_reg.match(idn), - ), f"""Invalid version ({value}). - Version must follow semantic versioning (see semver.org)""" - - assert all( - map(_check_version_number, version), - ), f"""Invalid version ({value}). - Version must follow semantic versioning (see semver.org)""" - return value - - @property - def major(self): - """Return x from x.y.z .""" - return int(self.root.split(".")[0]) - - @property - def minor(self): - """Return y from x.y.z .""" - return int(self.root.split(".")[1]) - - @property - def patch(self): - """Return z from x.y.z .""" - if not self.root.split(".")[2].isdigit(): - msg = "Patch version is not a digit, comparison may not be accurate." - logger.warning(msg) - return self.root.split(".")[2] - return int(self.root.split(".")[2]) - - def __str__(self) -> str: - """Return string representation of Version object.""" - return self.root - - @singledispatchmethod - def __lt__(self, other: Any) -> bool: - """Compare if Version is less than other object.""" - msg = "invalid type for comparison." - raise TypeError(msg) - - @singledispatchmethod - def __gt__(self, other: Any) -> bool: - """Compare if Version is less than other object.""" - msg = "invalid type for comparison." - raise TypeError(msg) - - @singledispatchmethod - def __eq__(self, other: Any) -> bool: - """Compare if two Version objects are equal.""" - msg = "invalid type for comparison." - raise TypeError(msg) - - def __hash__(self) -> int: - """Needed to use Version objects as dict keys.""" - return hash(self.root) - - def __repr__(self) -> str: - """Return string representation of Version object.""" - return self.root - - @Version.__eq__.register(str) # pylint: disable=no-member - def _(self, other): - return self == Version(other) - - @Version.__lt__.register(str) # pylint: disable=no-member - def _(self, other): - v = Version(other) - return self < v - - @Version.__gt__.register(str) # pylint: disable=no-member - def _(self, other): - v = Version(other) - return self > v - -else: # PYDANTIC_V1 - - class Version(BaseModel): - """SemVer object.""" - - version: str - - def __init__(self, version: str) -> None: - """Initialize Version object.""" - super().__init__(version=version) - - @validator("version") - def semantic_version( - cls, - value, - ): # ruff: noqa: ANN202, N805, ANN001 - """Pydantic Validator to check semver.""" - version = value.split(".") - - assert ( - len(version) == 3 # ruff: noqa: PLR2004 - ), f""" - Invalid version ({value}). Version must follow - semantic versioning (see semver.org)""" - if "-" in version[-1]: # with hyphen - idn = version[-1].split("-")[-1] - id_reg = re.compile("[0-9A-Za-z-]+") - assert bool( - id_reg.match(idn), - ), f"""Invalid version ({value}). - Version must follow semantic versioning (see semver.org)""" - - assert all( - map(_check_version_number, version), - ), f"""Invalid version ({value}). - Version must follow semantic versioning (see semver.org)""" - return value - - @property - def major(self): - """Return x from x.y.z .""" - return int(self.version.split(".")[0]) - - @property - def minor(self): - """Return y from x.y.z .""" - return int(self.version.split(".")[1]) - - @property - def patch(self): - """Return z from x.y.z .""" - if not self.version.split(".")[2].isdigit(): - msg = "Patch version is not a digit, comparison may not be accurate." - logger.warning(msg) - return self.version.split(".")[2] - return int(self.version.split(".")[2]) - - def __str__(self) -> str: - """Return string representation of Version object.""" - return self.version - - @singledispatchmethod - def __lt__(self, other: Any) -> bool: - """Compare if Version is less than other object.""" - msg = "invalid type for comparison." - raise TypeError(msg) - - @singledispatchmethod - def __gt__(self, other: Any) -> bool: - """Compare if Version is less than other object.""" - msg = "invalid type for comparison." - raise TypeError(msg) - - @singledispatchmethod - def __eq__(self, other: Any) -> bool: - """Compare if two Version objects are equal.""" - msg = "invalid type for comparison." - raise TypeError(msg) - - def __hash__(self) -> int: - """Needed to use Version objects as dict keys.""" - return hash(self.version) - - @Version.__eq__.register(str) # pylint: disable=no-member - def _(self, other): - return self == Version(**{"version": other}) - - @Version.__lt__.register(str) # pylint: disable=no-member - def _(self, other): - v = Version(**{"version": other}) - return self < v - - @Version.__gt__.register(str) # pylint: disable=no-member - def _(self, other): - v = Version(**{"version": other}) - return self > v - - -@Version.__eq__.register(Version) # pylint: disable=no-member -def _(self, other): - return ( - other.major == self.major - and other.minor == self.minor - and other.patch == self.patch - ) - - -@Version.__lt__.register(Version) # pylint: disable=no-member -def _(self, other): - if other.major > self.major: - return True - if other.major == self.major: - if other.minor > self.minor: - return True - if other.minor == self.minor: - if other.patch > self.patch: - return True - return False - return False - return False - - -@Version.__gt__.register(Version) # pylint: disable=no-member -def _(self, other): - return other < self - - -class DuplicateVersionFoundError(Exception): - """Raise when two equal versions found.""" - - -CWL_INPUT_TYPES = { - "path": "Directory", # always Dir? Yes - "string": "string", - "number": "double", - "boolean": "boolean", - "genericData": "Directory", - "collection": "Directory", - "enum": "string", # for compatibility with workflows - "stitchingVector": "Directory", - "integer": "long", - # not yet implemented: array -} - - -def _type_in(inp: Input): - """Return appropriate value for `type` based on input type.""" - val = inp.type.value - req = "" if inp.required else "?" - - # NOT compatible with CWL workflows, ok in CLT - # if val == "enum": - # if input.required: - - # if val in CWL_INPUT_TYPES: - return CWL_INPUT_TYPES[val] + req if val in CWL_INPUT_TYPES else "string" + req - - -def input_to_cwl(inp: Input): - """Return dict of inputs for cwl.""" - return { - f"{inp.name}": { - "type": _type_in(inp), - "inputBinding": {"prefix": f"--{inp.name}"}, - }, - } - - -def output_to_cwl(out: Output): - """Return dict of output args for cwl for input section.""" - return { - f"{out.name}": { - "type": "Directory", - "inputBinding": {"prefix": f"--{out.name}"}, - }, - } - - -def outputs_cwl(out: Output): - """Return dict of output for `outputs` in cwl.""" - return { - f"{out.name}": { - "type": "Directory", - "outputBinding": {"glob": f"$(inputs.{out.name}.basename)"}, - }, - } - - -# -- I/O as arguments in .yml - - -@singledispatch -def _io_value_to_yml(io) -> Union[str, dict]: - return str(io) - - -@_io_value_to_yml.register -def _(io: pathlib.Path): - return {"class": "Directory", "location": str(io)} - - -@_io_value_to_yml.register -def _(io: enum.Enum): - return io.name - - -def io_to_yml(io): - """Return IO entry for yml file.""" - return _io_value_to_yml(io.value) diff --git a/src/polus/tabular/_plugins/manifests/__init__.py b/src/polus/tabular/_plugins/manifests/__init__.py deleted file mode 100644 index 5854298..0000000 --- a/src/polus/tabular/_plugins/manifests/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -"""Initialize manifests module.""" - -from polus.tabular._plugins.manifests.manifest_utils import InvalidManifestError -from polus.tabular._plugins.manifests.manifest_utils import _error_log -from polus.tabular._plugins.manifests.manifest_utils import _load_manifest -from polus.tabular._plugins.manifests.manifest_utils import _scrape_manifests -from polus.tabular._plugins.manifests.manifest_utils import validate_manifest - -__all__ = [ - "InvalidManifestError", - "_load_manifest", - "validate_manifest", - "_error_log", - "_scrape_manifests", -] diff --git a/src/polus/tabular/_plugins/manifests/manifest_utils.py b/src/polus/tabular/_plugins/manifests/manifest_utils.py deleted file mode 100644 index 6a5c5f8..0000000 --- a/src/polus/tabular/_plugins/manifests/manifest_utils.py +++ /dev/null @@ -1,210 +0,0 @@ -"""Utilities for manifest parsing and validation.""" -import json -import logging -import pathlib -from typing import Optional -from typing import Union - -import github -import requests -import validators -from polus.tabular._plugins._compat import PYDANTIC_V2 -from polus.tabular._plugins.models import ComputeSchema -from polus.tabular._plugins.models import WIPPPluginManifest -from pydantic import ValidationError -from pydantic import errors -from tqdm import tqdm - -if not PYDANTIC_V2: - from polus.tabular._plugins.utils import cast_version - -logger = logging.getLogger("polus.tabular") - -# Fields that must be in a plugin manifest -REQUIRED_FIELDS = [ - "name", - "version", - "description", - "author", - "containerId", - "inputs", - "outputs", - "ui", -] - - -class InvalidManifestError(Exception): - """Raised when manifest has validation errors.""" - - -def is_valid_manifest(plugin: dict) -> bool: - """Validate basic attributes of a plugin manifest. - - Args: - plugin: A parsed plugin json file - - Returns: - True if the plugin has the minimal json fields - """ - fields = list(plugin.keys()) - - for field in REQUIRED_FIELDS: - if field not in fields: - msg = f"Missing json field, {field}, in plugin manifest." - logger.error(msg) - return False - return True - - -def _load_manifest(manifest: Union[str, dict, pathlib.Path]) -> dict: - """Return manifest as dict from str (url or path) or pathlib.Path.""" - if isinstance(manifest, dict): # is dict - return manifest - if isinstance(manifest, pathlib.Path): # is path - if manifest.suffix != ".json": - msg = "plugin manifest must be a json file with .json extension." - raise ValueError(msg) - - with manifest.open("r", encoding="utf-8") as manifest_json: - manifest_ = json.load(manifest_json) - elif isinstance(manifest, str): # is str - if validators.url(manifest): # is url - manifest_ = requests.get(manifest, timeout=10).json() - else: # could (and should) be path - try: - manifest_ = _load_manifest(pathlib.Path(manifest)) - except Exception as exc: # was not a Path? # noqa - msg = "invalid manifest" - raise ValueError(msg) from exc - else: # is not str, dict, or path - msg = f"invalid manifest type {type(manifest)}" - raise ValueError(msg) - return manifest_ - - -def validate_manifest( - manifest: Union[str, dict, pathlib.Path], -) -> Union[WIPPPluginManifest, ComputeSchema]: - """Validate a plugin manifest against schema.""" - manifest = _load_manifest(manifest) - if not PYDANTIC_V2: # Pydantic V1 - manifest["version"] = cast_version( - manifest["version"], - ) # cast version to semver object - if "name" in manifest: - name = manifest["name"] - else: - msg = f"{manifest} has no value for name" - raise InvalidManifestError(msg) - - if "pluginHardwareRequirements" in manifest: - # Parse the manifest - try: - plugin = ComputeSchema(**manifest) - except ValidationError as e: - msg = f"{name} does not conform to schema" - raise InvalidManifestError(msg) from e - except BaseException as e: - raise e - else: - # Parse the manifest - try: - plugin = WIPPPluginManifest(**manifest) - except ValidationError as e: - msg = f"{manifest['name']} does not conform to schema" - raise InvalidManifestError( - msg, - ) from e - except BaseException as e: - raise e - return plugin - - -def _scrape_manifests( - repo: Union[str, github.Repository.Repository], # type: ignore - gh: github.Github, - min_depth: int = 1, - max_depth: Optional[int] = None, - return_invalid: bool = False, -) -> Union[list, tuple[list, list]]: - if max_depth is None: - max_depth = min_depth - min_depth = 0 - - if not max_depth >= min_depth: - msg = "max_depth is smaller than min_depth" - raise ValueError(msg) - - if isinstance(repo, str): - repo = gh.get_repo(repo) - - contents = list(repo.get_contents("")) # type: ignore - next_contents: list = [] - valid_manifests: list = [] - invalid_manifests: list = [] - - for d in range(0, max_depth): - for content in tqdm(contents, desc=f"{repo.full_name}: {d}"): - if content.type == "dir": - next_contents.extend(repo.get_contents(content.path)) # type: ignore - elif content.name.endswith(".json") and d >= min_depth: - manifest = json.loads(content.decoded_content) - if is_valid_manifest(manifest): - valid_manifests.append(manifest) - else: - invalid_manifests.append(manifest) - - contents = next_contents.copy() - next_contents = [] - - if return_invalid: - return valid_manifests, invalid_manifests - return valid_manifests - - -def _error_log(val_err: ValidationError, manifest: dict, fct: str) -> None: - report = [] - - for error in val_err.args[0]: - if isinstance(error, list): - error = error[0] # noqa - - if isinstance(error, AssertionError): - msg = ( - f"The plugin ({manifest['name']}) " - "failed an assertion check: {err.args[0]}" - ) - report.append(msg) - logger.critical(f"{fct}: {report[-1]}") # pylint: disable=W1203 - elif isinstance(error.exc, errors.MissingError): - msg = ( - f"The plugin ({manifest['name']}) " - "is missing fields: {err.loc_tuple()}" - ) - report.append(msg) - logger.critical(f"{fct}: {report[-1]}") # pylint: disable=W1203 - elif errors.ExtraError: - if error.loc_tuple()[0] in ["inputs", "outputs", "ui"]: - manifest_ = manifest[error.loc_tuple()[0]][error.loc_tuple()[1]]["name"] - msg = ( - f"The plugin ({manifest['name']}) " - "had unexpected values in the " - f"{error.loc_tuple()[0]} " - f"({manifest_}): " - f"{error.exc.args[0][0].loc_tuple()}" - ) - report.append(msg) - else: - msg = ( - f"The plugin ({manifest['name']}) " - "had an error: {err.exc.args[0][0]}" - ) - report.append(msg) - logger.critical(f"{fct}: {report[-1]}") # pylint: disable=W1203 - else: - str_val_err = str(val_err).replace("\n", ", ").replace(" ", " ") - msg = ( - f"{fct}: Uncaught manifest error in ({manifest['name']}): " - f"{str_val_err}" - ) - logger.warning(msg) diff --git a/src/polus/tabular/_plugins/models/PolusComputeSchema.json b/src/polus/tabular/_plugins/models/PolusComputeSchema.json deleted file mode 100644 index d4875d5..0000000 --- a/src/polus/tabular/_plugins/models/PolusComputeSchema.json +++ /dev/null @@ -1,499 +0,0 @@ -{ - "definitions": { - "PluginInputType": { - "title": "PluginInputType", - "description": "An enumeration.", - "enum": [ - "path", - "string", - "number", - "array", - "boolean" - ] - }, - "PluginInput": { - "title": "PluginInput", - "type": "object", - "properties": { - "format": { - "title": "Format", - "type": "string" - }, - "label": { - "title": "Label", - "type": "string" - }, - "name": { - "title": "Name", - "type": "string" - }, - "required": { - "title": "Required", - "type": "boolean" - }, - "type": { - "$ref": "#/definitions/PluginInputType" - }, - "default": { - "title": "Default", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - }, - { - "type": "boolean" - } - ] - } - }, - "required": [ - "name", - "required", - "type" - ] - }, - "PluginOutputType": { - "title": "PluginOutputType", - "description": "An enumeration.", - "enum": [ - "path" - ], - "type": "string" - }, - "PluginOutput": { - "title": "PluginOutput", - "type": "object", - "properties": { - "format": { - "title": "Format", - "type": "string" - }, - "label": { - "title": "Label", - "type": "string" - }, - "name": { - "title": "Name", - "type": "string" - }, - "type": { - "$ref": "#/definitions/PluginOutputType" - } - }, - "required": [ - "name", - "type" - ] - }, - "GpuVendor": { - "title": "GpuVendor", - "description": "An enumeration.", - "enum": [ - "none", - "amd", - "tpu", - "nvidia" - ], - "type": "string" - }, - "PluginHardwareRequirements": { - "title": "PluginHardwareRequirements", - "type": "object", - "properties": { - "coresMax": { - "title": "Coresmax", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - } - ] - }, - "coresMin": { - "title": "Coresmin", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - } - ] - }, - "cpuAVX": { - "title": "Cpuavx", - "type": "boolean" - }, - "cpuAVX2": { - "title": "Cpuavx2", - "type": "boolean" - }, - "cpuMin": { - "title": "Cpumin", - "type": "string" - }, - "gpu": { - "$ref": "#/definitions/GpuVendor" - }, - "gpuCount": { - "title": "Gpucount", - "type": "number" - }, - "gpuDriverVersion": { - "title": "Gpudriverversion", - "type": "string" - }, - "gpuType": { - "title": "Gputype", - "type": "string" - }, - "outDirMax": { - "title": "Outdirmax", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - } - ] - }, - "outDirMin": { - "title": "Outdirmin", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - } - ] - }, - "ramMax": { - "title": "Rammax", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - } - ] - }, - "ramMin": { - "title": "Rammin", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - } - ] - }, - "tmpDirMax": { - "title": "Tmpdirmax", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - } - ] - }, - "tmpDirMin": { - "title": "Tmpdirmin", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - } - ] - } - } - }, - "ThenEntry": { - "title": "ThenEntry", - "type": "object", - "properties": { - "action": { - "title": "Action", - "type": "string" - }, - "input": { - "title": "Input", - "type": "string" - }, - "value": { - "title": "Value", - "type": "string" - } - }, - "required": [ - "action", - "input", - "value" - ] - }, - "ConditionEntry": { - "title": "ConditionEntry", - "type": "object", - "properties": { - "expression": { - "title": "Expression", - "type": "string" - } - }, - "required": [ - "expression" - ] - }, - "Validator": { - "title": "Validator", - "type": "object", - "properties": { - "then": { - "title": "Then", - "type": "array", - "items": { - "$ref": "#/definitions/ThenEntry" - } - }, - "validator": { - "title": "Validator", - "type": "array", - "items": { - "$ref": "#/definitions/ConditionEntry" - } - } - } - }, - "PluginUIType": { - "title": "PluginUIType", - "description": "An enumeration.", - "enum": [ - "checkbox", - "color", - "date", - "email", - "number", - "password", - "radio", - "range", - "text", - "time" - ] - }, - "PluginUIInput": { - "title": "PluginUIInput", - "type": "object", - "properties": { - "bind": { - "title": "Bind", - "type": "string" - }, - "condition": { - "title": "Condition", - "anyOf": [ - { - "type": "array", - "items": { - "$ref": "#/definitions/Validator" - } - }, - { - "type": "string" - } - ] - }, - "default": { - "title": "Default", - "anyOf": [ - { - "type": "string" - }, - { - "type": "number" - }, - { - "type": "boolean" - } - ] - }, - "description": { - "title": "Description", - "type": "string" - }, - "fieldset": { - "title": "Fieldset", - "type": "array", - "items": { - "type": "string" - } - }, - "hidden": { - "title": "Hidden", - "type": "boolean" - }, - "key": { - "title": "Key", - "type": "string" - }, - "title": { - "title": "Title", - "type": "string" - }, - "type": { - "$ref": "#/definitions/PluginUIType" - } - }, - "required": [ - "key", - "title", - "type" - ] - }, - "PluginUIOutput": { - "title": "PluginUIOutput", - "type": "object", - "properties": { - "description": { - "title": "Description", - "type": "string" - }, - "format": { - "title": "Format", - "type": "string" - }, - "name": { - "title": "Name", - "type": "string" - }, - "type": { - "$ref": "#/definitions/PluginUIType" - }, - "website": { - "title": "Website", - "type": "string" - } - }, - "required": [ - "description", - "name", - "type" - ] - }, - "PluginSchema": { - "title": "PluginSchema", - "type": "object", - "properties": { - "author": { - "title": "Author", - "type": "string" - }, - "baseCommand": { - "title": "Basecommand", - "type": "array", - "items": { - "type": "string" - } - }, - "citation": { - "title": "Citation", - "type": "string" - }, - "containerId": { - "title": "Containerid", - "type": "string" - }, - "customInputs": { - "title": "Custominputs", - "type": "boolean" - }, - "description": { - "title": "Description", - "type": "string" - }, - "inputs": { - "title": "Inputs", - "type": "array", - "items": { - "$ref": "#/definitions/PluginInput" - } - }, - "institution": { - "title": "Institution", - "type": "string" - }, - "name": { - "title": "Name", - "type": "string" - }, - "outputs": { - "title": "Outputs", - "type": "array", - "items": { - "$ref": "#/definitions/PluginOutput" - } - }, - "pluginHardwareRequirements": { - "$ref": "#/definitions/PluginHardwareRequirements" - }, - "repository": { - "title": "Repository", - "type": "string" - }, - "title": { - "title": "Title", - "type": "string" - }, - "ui": { - "title": "Ui", - "type": "array", - "items": { - "anyOf": [ - { - "$ref": "#/definitions/PluginUIInput" - }, - { - "$ref": "#/definitions/PluginUIOutput" - } - ] - } - }, - "version": { - "title": "Version", - "examples": [ - "0.1.0", - "0.1.0rc1" - ], - "pattern": "^(0|[1-9]\\d*)\\.(0|[1-9]\\d*)\\.(0|[1-9]\\d*)(?:-((?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\\.(?:0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\\+([0-9a-zA-Z-]+(?:\\.[0-9a-zA-Z-]+)*))?$", - "type": "string" - }, - "website": { - "title": "Website", - "type": "string" - } - }, - "required": [ - "containerId", - "description", - "inputs", - "name", - "outputs", - "pluginHardwareRequirements", - "title", - "ui", - "version" - ] - } - } -} diff --git a/src/polus/tabular/_plugins/models/PolusComputeSchema.ts b/src/polus/tabular/_plugins/models/PolusComputeSchema.ts deleted file mode 100644 index 184ebbf..0000000 --- a/src/polus/tabular/_plugins/models/PolusComputeSchema.ts +++ /dev/null @@ -1,102 +0,0 @@ -/* tslint:disable */ -/* eslint-disable */ -/** -/* This file was automatically generated from pydantic models by running pydantic2ts. -/* Do not modify it by hand - just update the pydantic models and then re-run the script -*/ - -export type GpuVendor = "none" | "amd" | "tpu" | "nvidia"; -export type PluginInputType = "path" | "string" | "number" | "array" | "boolean"; -export type PluginOutputType = "path"; -export type PluginUIType = - | "checkbox" - | "color" - | "date" - | "email" - | "number" - | "password" - | "radio" - | "range" - | "text" - | "time"; - -export interface ConditionEntry { - expression: string; -} -export interface Model {} -export interface PluginHardwareRequirements { - coresMax?: string | number; - coresMin?: string | number; - cpuAVX?: boolean; - cpuAVX2?: boolean; - cpuMin?: string; - gpu?: GpuVendor; - gpuCount?: number; - gpuDriverVersion?: string; - gpuType?: string; - outDirMax?: string | number; - outDirMin?: string | number; - ramMax?: string | number; - ramMin?: string | number; - tmpDirMax?: string | number; - tmpDirMin?: string | number; -} -export interface PluginInput { - format?: string; - label?: string; - name: string; - required: boolean; - type: PluginInputType; - default?: string | number | boolean; -} -export interface PluginOutput { - format?: string; - label?: string; - name: string; - type: PluginOutputType; -} -export interface PluginSchema { - author?: string; - baseCommand?: string[]; - citation?: string; - containerId: string; - customInputs?: boolean; - description: string; - inputs: PluginInput[]; - institution?: string; - name: string; - outputs: PluginOutput[]; - pluginHardwareRequirements: PluginHardwareRequirements; - repository?: string; - title: string; - ui: (PluginUIInput | PluginUIOutput)[]; - version: string; - website?: string; -} -export interface PluginUIInput { - bind?: string; - condition?: Validator[] | string; - default?: string | number | boolean; - description?: string; - fieldset?: string[]; - hidden?: boolean; - key: string; - title: string; - type: PluginUIType; -} -export interface Validator { - then?: ThenEntry[]; - validator?: ConditionEntry[]; -} -export interface ThenEntry { - action: string; - input: string; - value: string; -} -export interface PluginUIOutput { - description: string; - format?: string; - name: string; - type: PluginUIType; - website?: string; -} diff --git a/src/polus/tabular/_plugins/models/__init__.py b/src/polus/tabular/_plugins/models/__init__.py deleted file mode 100644 index 55f3558..0000000 --- a/src/polus/tabular/_plugins/models/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -"""Pydantic Models based on JSON schemas.""" - -import pydantic - -PYDANTIC_VERSION = pydantic.__version__ - -if PYDANTIC_VERSION.split(".")[0] == "1": - from polus.tabular._plugins.models.pydanticv1.compute import ( - PluginSchema as ComputeSchema, - ) - from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import ( - PluginUIInput, - ) - from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import ( - PluginUIOutput, - ) - from polus.tabular._plugins.models.pydanticv1.wipp import WIPPPluginManifest -elif PYDANTIC_VERSION.split(".")[0] == "2": - from polus.tabular._plugins.models.pydanticv2.compute import ( - PluginSchema as ComputeSchema, - ) - from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import ( - PluginUIInput, - ) - from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import ( - PluginUIOutput, - ) - from polus.tabular._plugins.models.pydanticv2.wipp import WIPPPluginManifest - -__all__ = [ - "WIPPPluginManifest", - "PluginUIInput", - "PluginUIOutput", - "ComputeSchema", -] diff --git a/src/polus/tabular/_plugins/models/pydanticv1/PolusComputeSchema.py b/src/polus/tabular/_plugins/models/pydanticv1/PolusComputeSchema.py deleted file mode 100644 index a40b5b4..0000000 --- a/src/polus/tabular/_plugins/models/pydanticv1/PolusComputeSchema.py +++ /dev/null @@ -1,137 +0,0 @@ -# generated by datamodel-codegen: -# timestamp: 2022-09-21T03:41:58+00:00 - -from __future__ import annotations - -from enum import Enum -from typing import Any - -from pydantic import BaseModel -from pydantic import Field -from pydantic import constr - - -class Model(BaseModel): - __root__: Any - - -class PluginInputType(Enum): - path = "path" - string = "string" - number = "number" - array = "array" - boolean = "boolean" - - -class PluginInput(BaseModel): - format: str | None = Field(None, title="Format") - label: str | None = Field(None, title="Label") - name: str = Field(..., title="Name") - required: bool = Field(..., title="Required") - type: PluginInputType - default: str | float | bool | None = Field(None, title="Default") - - -class PluginOutputType(Enum): - path = "path" - - -class PluginOutput(BaseModel): - format: str | None = Field(None, title="Format") - label: str | None = Field(None, title="Label") - name: str = Field(..., title="Name") - type: PluginOutputType - - -class GpuVendor(Enum): - none = "none" - amd = "amd" - tpu = "tpu" - nvidia = "nvidia" - - -class PluginHardwareRequirements(BaseModel): - coresMax: str | float | None = Field(None, title="Coresmax") - coresMin: str | float | None = Field(None, title="Coresmin") - cpuAVX: bool | None = Field(None, title="Cpuavx") - cpuAVX2: bool | None = Field(None, title="Cpuavx2") - cpuMin: str | None = Field(None, title="Cpumin") - gpu: GpuVendor | None = None - gpuCount: float | None = Field(None, title="Gpucount") - gpuDriverVersion: str | None = Field(None, title="Gpudriverversion") - gpuType: str | None = Field(None, title="Gputype") - outDirMax: str | float | None = Field(None, title="Outdirmax") - outDirMin: str | float | None = Field(None, title="Outdirmin") - ramMax: str | float | None = Field(None, title="Rammax") - ramMin: str | float | None = Field(None, title="Rammin") - tmpDirMax: str | float | None = Field(None, title="Tmpdirmax") - tmpDirMin: str | float | None = Field(None, title="Tmpdirmin") - - -class ThenEntry(BaseModel): - action: str = Field(..., title="Action") - input: str = Field(..., title="Input") - value: str = Field(..., title="Value") - - -class ConditionEntry(BaseModel): - expression: str = Field(..., title="Expression") - - -class Validator(BaseModel): - then: list[ThenEntry] | None = Field(None, title="Then") - validator: list[ConditionEntry] | None = Field(None, title="Validator") - - -class PluginUIType(Enum): - checkbox = "checkbox" - color = "color" - date = "date" - email = "email" - number = "number" - password = "password" - radio = "radio" - range = "range" - text = "text" - time = "time" - - -class PluginUIInput(BaseModel): - bind: str | None = Field(None, title="Bind") - condition: list[Validator] | str | None = Field(None, title="Condition") - default: str | float | bool | None = Field(None, title="Default") - description: str | None = Field(None, title="Description") - fieldset: list[str] | None = Field(None, title="Fieldset") - hidden: bool | None = Field(None, title="Hidden") - key: str = Field(..., title="Key") - title: str = Field(..., title="Title") - type: PluginUIType - - -class PluginUIOutput(BaseModel): - description: str = Field(..., title="Description") - format: str | None = Field(None, title="Format") - name: str = Field(..., title="Name") - type: PluginUIType - website: str | None = Field(None, title="Website") - - -class PluginSchema(BaseModel): - author: str | None = Field(None, title="Author") - baseCommand: list[str] | None = Field(None, title="Basecommand") - citation: str | None = Field(None, title="Citation") - containerId: str = Field(..., title="Containerid") - customInputs: bool | None = Field(None, title="Custominputs") - description: str = Field(..., title="Description") - inputs: list[PluginInput] = Field(..., title="Inputs") - institution: str | None = Field(None, title="Institution") - name: str = Field(..., title="Name") - outputs: list[PluginOutput] = Field(..., title="Outputs") - pluginHardwareRequirements: PluginHardwareRequirements - repository: str | None = Field(None, title="Repository") - title: str = Field(..., title="Title") - ui: list[PluginUIInput | PluginUIOutput] = Field(..., title="Ui") - version: constr( - regex=r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$", - ) = Field(..., examples=["0.1.0", "0.1.0rc1"], title="Version") - website: str | None = Field(None, title="Website") diff --git a/src/polus/tabular/_plugins/models/pydanticv1/WIPPPluginSchema.py b/src/polus/tabular/_plugins/models/pydanticv1/WIPPPluginSchema.py deleted file mode 100644 index 718d3a3..0000000 --- a/src/polus/tabular/_plugins/models/pydanticv1/WIPPPluginSchema.py +++ /dev/null @@ -1,233 +0,0 @@ -# generated by datamodel-codegen: -# timestamp: 2023-01-04T14:54:38+00:00 - -from __future__ import annotations - -from enum import Enum -from typing import Any - -from pydantic import AnyUrl -from pydantic import BaseModel -from pydantic import Field -from pydantic import constr - - -class Type(Enum): - collection = "collection" - stitchingVector = "stitchingVector" - tensorflowModel = "tensorflowModel" - csvCollection = "csvCollection" - pyramid = "pyramid" - pyramidAnnotation = "pyramidAnnotation" - notebook = "notebook" - genericData = "genericData" - string = "string" - number = "number" - integer = "integer" - enum = "enum" - array = "array" - boolean = "boolean" - - -class Input(BaseModel): - name: constr(regex=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$") = Field( - ..., - description="Input name as expected by the plugin CLI", - examples=["inputImages", "fileNamePattern", "thresholdValue"], - title="Input name", - ) - type: Type = Field( - ..., - examples=["collection", "string", "number"], - title="Input Type", - ) - description: constr(regex=r"^(.*)$") = Field( - ..., - examples=["Input Images"], - title="Input description", - ) - required: bool | None = Field( - True, - description="Whether an input is required or not", - examples=[True], - title="Required input", - ) - - -class Type1(Enum): - collection = "collection" - stitchingVector = "stitchingVector" - tensorflowModel = "tensorflowModel" - tensorboardLogs = "tensorboardLogs" - csvCollection = "csvCollection" - pyramid = "pyramid" - pyramidAnnotation = "pyramidAnnotation" - genericData = "genericData" - - -class Output(BaseModel): - name: constr(regex=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$") = Field( - ..., - examples=["outputCollection"], - title="Output name", - ) - type: Type1 = Field( - ..., - examples=["stitchingVector", "collection"], - title="Output type", - ) - description: constr(regex=r"^(.*)$") = Field( - ..., - examples=["Output collection"], - title="Output description", - ) - - -class UiItem(BaseModel): - key: Any | Any = Field( - ..., - description="Key of the input which this UI definition applies to, the expected format is 'inputs.inputName'. Special keyword 'fieldsets' can be used to define arrangement of inputs by sections.", - examples=["inputs.inputImages", "inputs.fileNamPattern", "fieldsets"], - title="UI key", - ) - - -class CudaRequirements(BaseModel): - deviceMemoryMin: float | None = Field( - 0, - examples=[100], - title="Minimum device memory", - ) - cudaComputeCapability: str | list[Any] | None = Field( - None, - description="Specify either a single minimum value, or an array of valid values", - examples=["8.0", ["3.5", "5.0", "6.0", "7.0", "7.5", "8.0"]], - title="The cudaComputeCapability Schema", - ) - - -class ResourceRequirements(BaseModel): - ramMin: float | None = Field( - None, - examples=[2048], - title="Minimum RAM in mebibytes (Mi)", - ) - coresMin: float | None = Field( - None, - examples=[1], - title="Minimum number of CPU cores", - ) - cpuAVX: bool | None = Field( - False, - examples=[True], - title="Advanced Vector Extensions (AVX) CPU capability required", - ) - cpuAVX2: bool | None = Field( - False, - examples=[False], - title="Advanced Vector Extensions 2 (AVX2) CPU capability required", - ) - gpu: bool | None = Field( - False, - examples=[True], - title="GPU/accelerator required", - ) - cudaRequirements: CudaRequirements | None = Field( - {}, - examples=[{"deviceMemoryMin": 100, "cudaComputeCapability": "8.0"}], - title="GPU Cuda-related requirements", - ) - - -class WippPluginManifest(BaseModel): - name: constr(regex=r"^(.*)$", min_length=1) = Field( - ..., - description="Name of the plugin (format: org/name)", - examples=["wipp/plugin-example"], - title="Plugin name", - ) - version: constr(regex=r"^(.*)$", min_length=1) = Field( - ..., - description="Version of the plugin (semantic versioning preferred)", - examples=["1.0.0"], - title="Plugin version", - ) - title: constr(regex=r"^(.*)$", min_length=1) = Field( - ..., - description="Plugin title to display in WIPP forms", - examples=["WIPP Plugin example"], - title="Plugin title", - ) - description: constr(regex=r"^(.*)$", min_length=1) = Field( - ..., - examples=["Custom image segmentation plugin"], - title="Short description of the plugin", - ) - author: constr(regex="^(.*)$") | None | None = Field( - "", - examples=["FirstName LastName"], - title="Author(s)", - ) - institution: constr(regex="^(.*)$") | None | None = Field( - "", - examples=["National Institute of Standards and Technology"], - title="Institution", - ) - repository: AnyUrl | None | None = Field( - "", - examples=["https://github.com/usnistgov/WIPP"], - title="Source code repository", - ) - website: AnyUrl | None | None = Field( - "", - examples=["http://usnistgov.github.io/WIPP"], - title="Website", - ) - citation: constr(regex="^(.*)$") | None | None = Field( - "", - examples=[ - "Peter Bajcsy, Joe Chalfoun, and Mylene Simon (2018). Web Microanalysis of Big Image Data. Springer-Verlag International", - ], - title="Citation", - ) - containerId: constr(regex=r"^(.*)$") = Field( - ..., - description="Docker image ID", - examples=["docker.io/wipp/plugin-example:1.0.0"], - title="ContainerId", - ) - baseCommand: list[str] | None = Field( - None, - description="Base command to use while running container image", - examples=[["python3", "/opt/executable/main.py"]], - title="Base command", - ) - inputs: list[Input] = Field( - ..., - description="Defines inputs to the plugin", - title="List of Inputs", - unique_items=True, - ) - outputs: list[Output] = Field( - ..., - description="Defines the outputs of the plugin", - title="List of Outputs", - ) - ui: list[UiItem] = Field(..., title="Plugin form UI definition") - resourceRequirements: ResourceRequirements | None = Field( - {}, - examples=[ - { - "ramMin": 2048, - "coresMin": 1, - "cpuAVX": True, - "cpuAVX2": False, - "gpu": True, - "cudaRequirements": { - "deviceMemoryMin": 100, - "cudaComputeCapability": "8.0", - }, - }, - ], - title="Plugin Resource Requirements", - ) diff --git a/src/polus/tabular/_plugins/models/pydanticv1/__init__.py b/src/polus/tabular/_plugins/models/pydanticv1/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/polus/tabular/_plugins/models/pydanticv1/compute.py b/src/polus/tabular/_plugins/models/pydanticv1/compute.py deleted file mode 100644 index 86e2c27..0000000 --- a/src/polus/tabular/_plugins/models/pydanticv1/compute.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Extending automatically generated compute model. - -This file modifies and extend certain fields and -functions of PolusComputeSchema.py which is automatically -generated by datamodel-codegen from JSON schema. -""" - -from polus.tabular._plugins.io import IOBase -from polus.tabular._plugins.io import Version -from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import PluginInput -from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import PluginOutput -from polus.tabular._plugins.models.pydanticv1.PolusComputeSchema import PluginSchema - - -class PluginInput(PluginInput, IOBase): # type: ignore - """Base Class for Input Args.""" - - -class PluginOutput(PluginOutput, IOBase): # type: ignore - """Base Class for Output Args.""" - - -class PluginSchema(PluginSchema): # type: ignore - """Extended Compute Plugin Schema with extended IO defs.""" - - inputs: list[PluginInput] - outputs: list[PluginOutput] - version: Version diff --git a/src/polus/tabular/_plugins/models/pydanticv1/wipp.py b/src/polus/tabular/_plugins/models/pydanticv1/wipp.py deleted file mode 100644 index 402cba5..0000000 --- a/src/polus/tabular/_plugins/models/pydanticv1/wipp.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Extending automatically generated wipp model. - -This file modifies and extend certain fields and -functions of WIPPPluginSchema.py which is automatically -generated by datamodel-codegen from JSON schema. -""" -from typing import Literal -from typing import Optional -from typing import Union - -from polus.tabular._plugins.io import Input -from polus.tabular._plugins.io import Output -from polus.tabular._plugins.io import Version -from polus.tabular._plugins.models.pydanticv1.WIPPPluginSchema import ( - ResourceRequirements, -) -from polus.tabular._plugins.models.pydanticv1.WIPPPluginSchema import WippPluginManifest -from pydantic import BaseModel -from pydantic import Field - - -class UI1(BaseModel): - """Base class for UI items.""" - - key: str = Field(constr=r"^inputs.[a-zA-Z0-9][-a-zA-Z0-9]*$") - title: str - description: Optional[str] - condition: Optional[str] - default: Optional[Union[str, float, int, bool]] - hidden: Optional[bool] = Field(default=False) - bind: Optional[str] - - -class FieldSet(BaseModel): - """Base class for FieldSet.""" - - title: str - fields: list[str] = Field(min_items=1, unique_items=True) - - -class UI2(BaseModel): - """UI items class for fieldsets.""" - - key: Literal["fieldsets"] - fieldsets: list[FieldSet] = Field(min_items=1, unique_items=True) - - -class WIPPPluginManifest(WippPluginManifest): - """Extended WIPP Plugin Schema.""" - - inputs: list[Input] = Field( - ..., - description="Defines inputs to the plugin", - title="List of Inputs", - ) - outputs: list[Output] = Field( - ..., - description="Defines the outputs of the plugin", - title="List of Outputs", - ) - ui: list[Union[UI1, UI2]] = Field(..., title="Plugin form UI definition") - version: Version - resourceRequirements: Optional[ResourceRequirements] = Field( # noqa - None, - examples=[ - { - "ramMin": 2048, - "coresMin": 1, - "cpuAVX": True, - "cpuAVX2": False, - "gpu": True, - "cudaRequirements": { - "deviceMemoryMin": 100, - "cudaComputeCapability": "8.0", - }, - }, - ], - title="Plugin Resource Requirements", - ) diff --git a/src/polus/tabular/_plugins/models/pydanticv2/PolusComputeSchema.py b/src/polus/tabular/_plugins/models/pydanticv2/PolusComputeSchema.py deleted file mode 100644 index d87a986..0000000 --- a/src/polus/tabular/_plugins/models/pydanticv2/PolusComputeSchema.py +++ /dev/null @@ -1,136 +0,0 @@ -# generated by datamodel-codegen: edited by Camilo Velez -# timestamp: 2022-09-21T03:41:58+00:00 - -from __future__ import annotations - -from enum import Enum -from typing import Annotated - -from pydantic import BaseModel -from pydantic import Field -from pydantic import StringConstraints - - -class PluginInputType(Enum): - path = "path" - string = "string" - number = "number" - array = "array" - boolean = "boolean" - - -class PluginInput(BaseModel): - format: str | None = Field(None, title="Format") - label: str | None = Field(None, title="Label") - name: str = Field(..., title="Name") - required: bool = Field(..., title="Required") - type: PluginInputType - default: str | float | bool | None = Field(None, title="Default") - - -class PluginOutputType(Enum): - path = "path" - - -class PluginOutput(BaseModel): - format: str | None = Field(None, title="Format") - label: str | None = Field(None, title="Label") - name: str = Field(..., title="Name") - type: PluginOutputType - - -class GpuVendor(Enum): - none = "none" - amd = "amd" - tpu = "tpu" - nvidia = "nvidia" - - -class PluginHardwareRequirements(BaseModel): - coresMax: str | float | None = Field(None, title="Coresmax") - coresMin: str | float | None = Field(None, title="Coresmin") - cpuAVX: bool | None = Field(None, title="Cpuavx") - cpuAVX2: bool | None = Field(None, title="Cpuavx2") - cpuMin: str | None = Field(None, title="Cpumin") - gpu: GpuVendor | None = None - gpuCount: float | None = Field(None, title="Gpucount") - gpuDriverVersion: str | None = Field(None, title="Gpudriverversion") - gpuType: str | None = Field(None, title="Gputype") - outDirMax: str | float | None = Field(None, title="Outdirmax") - outDirMin: str | float | None = Field(None, title="Outdirmin") - ramMax: str | float | None = Field(None, title="Rammax") - ramMin: str | float | None = Field(None, title="Rammin") - tmpDirMax: str | float | None = Field(None, title="Tmpdirmax") - tmpDirMin: str | float | None = Field(None, title="Tmpdirmin") - - -class ThenEntry(BaseModel): - action: str = Field(..., title="Action") - input: str = Field(..., title="Input") - value: str = Field(..., title="Value") - - -class ConditionEntry(BaseModel): - expression: str = Field(..., title="Expression") - - -class Validator(BaseModel): - then: list[ThenEntry] | None = Field(None, title="Then") - validator: list[ConditionEntry] | None = Field(None, title="Validator") - - -class PluginUIType(Enum): - checkbox = "checkbox" - color = "color" - date = "date" - email = "email" - number = "number" - password = "password" - radio = "radio" - range = "range" - text = "text" - time = "time" - - -class PluginUIInput(BaseModel): - bind: str | None = Field(None, title="Bind") - condition: list[Validator] | str | None = Field(None, title="Condition") - default: str | float | bool | None = Field(None, title="Default") - description: str | None = Field(None, title="Description") - fieldset: list[str] | None = Field(None, title="Fieldset") - hidden: bool | None = Field(None, title="Hidden") - key: str = Field(..., title="Key") - title: str = Field(..., title="Title") - type: PluginUIType - - -class PluginUIOutput(BaseModel): - description: str = Field(..., title="Description") - format: str | None = Field(None, title="Format") - name: str = Field(..., title="Name") - type: PluginUIType - website: str | None = Field(None, title="Website") - - -class PluginSchema(BaseModel): - author: str | None = Field(None, title="Author") - baseCommand: list[str] | None = Field(None, title="Basecommand") - citation: str | None = Field(None, title="Citation") - containerId: str = Field(..., title="Containerid") - customInputs: bool | None = Field(None, title="Custominputs") - description: str = Field(..., title="Description") - inputs: list[PluginInput] = Field(..., title="Inputs") - institution: str | None = Field(None, title="Institution") - name: str = Field(..., title="Name") - outputs: list[PluginOutput] = Field(..., title="Outputs") - pluginHardwareRequirements: PluginHardwareRequirements - repository: str | None = Field(None, title="Repository") - title: str = Field(..., title="Title") - ui: list[PluginUIInput | PluginUIOutput] = Field(..., title="Ui") - version: Annotated[ - str, - StringConstraints( - pattern=r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$", - ), - ] = Field(..., examples=["0.1.0", "0.1.0rc1"], title="Version") - website: str | None = Field(None, title="Website") diff --git a/src/polus/tabular/_plugins/models/pydanticv2/WIPPPluginSchema.py b/src/polus/tabular/_plugins/models/pydanticv2/WIPPPluginSchema.py deleted file mode 100644 index 099cb32..0000000 --- a/src/polus/tabular/_plugins/models/pydanticv2/WIPPPluginSchema.py +++ /dev/null @@ -1,241 +0,0 @@ -# generated by datamodel-codegen: edited by Camilo Velez -# timestamp: 2023-01-04T14:54:38+00:00 - -from __future__ import annotations - -from enum import Enum -from typing import Annotated -from typing import Any - -from pydantic import AnyUrl -from pydantic import BaseModel -from pydantic import Field -from pydantic import StringConstraints - - -class Type(Enum): - collection = "collection" - stitchingVector = "stitchingVector" - tensorflowModel = "tensorflowModel" - csvCollection = "csvCollection" - pyramid = "pyramid" - pyramidAnnotation = "pyramidAnnotation" - notebook = "notebook" - genericData = "genericData" - string = "string" - number = "number" - integer = "integer" - enum = "enum" - array = "array" - boolean = "boolean" - - -class Input(BaseModel): - name: Annotated[ - str, - StringConstraints(pattern=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$"), - ] = Field( - ..., - description="Input name as expected by the plugin CLI", - examples=["inputImages", "fileNamePattern", "thresholdValue"], - title="Input name", - ) - type: Type = Field( - ..., - examples=["collection", "string", "number"], - title="Input Type", - ) - description: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( - ..., - examples=["Input Images"], - title="Input description", - ) - required: bool | None = Field( - True, - description="Whether an input is required or not", - examples=[True], - title="Required input", - ) - - -class Type1(Enum): - collection = "collection" - stitchingVector = "stitchingVector" - tensorflowModel = "tensorflowModel" - tensorboardLogs = "tensorboardLogs" - csvCollection = "csvCollection" - pyramid = "pyramid" - pyramidAnnotation = "pyramidAnnotation" - genericData = "genericData" - - -class Output(BaseModel): - name: Annotated[ - str, - StringConstraints(pattern=r"^[a-zA-Z0-9][-a-zA-Z0-9]*$"), - ] = Field(..., examples=["outputCollection"], title="Output name") - type: Type1 = Field( - ..., - examples=["stitchingVector", "collection"], - title="Output type", - ) - description: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( - ..., - examples=["Output collection"], - title="Output description", - ) - - -class UiItem(BaseModel): - key: Any | Any = Field( - ..., - description="Key of the input which this UI definition applies to, the expected format is 'inputs.inputName'. Special keyword 'fieldsets' can be used to define arrangement of inputs by sections.", - examples=["inputs.inputImages", "inputs.fileNamPattern", "fieldsets"], - title="UI key", - ) - - -class CudaRequirements(BaseModel): - deviceMemoryMin: float | None = Field( - 0, - examples=[100], - title="Minimum device memory", - ) - cudaComputeCapability: str | list[Any] | None = Field( - None, - description="Specify either a single minimum value, or an array of valid values", - examples=["8.0", ["3.5", "5.0", "6.0", "7.0", "7.5", "8.0"]], - title="The cudaComputeCapability Schema", - ) - - -class ResourceRequirements(BaseModel): - ramMin: float | None = Field( - None, - examples=[2048], - title="Minimum RAM in mebibytes (Mi)", - ) - coresMin: float | None = Field( - None, - examples=[1], - title="Minimum number of CPU cores", - ) - cpuAVX: bool | None = Field( - False, - examples=[True], - title="Advanced Vector Extensions (AVX) CPU capability required", - ) - cpuAVX2: bool | None = Field( - False, - examples=[False], - title="Advanced Vector Extensions 2 (AVX2) CPU capability required", - ) - gpu: bool | None = Field( - False, - examples=[True], - title="GPU/accelerator required", - ) - cudaRequirements: CudaRequirements | None = Field( - {}, - examples=[{"deviceMemoryMin": 100, "cudaComputeCapability": "8.0"}], - title="GPU Cuda-related requirements", - ) - - -class WippPluginManifest(BaseModel): - name: Annotated[str, StringConstraints(pattern=r"^(.*)$", min_length=1)] = Field( - ..., - description="Name of the plugin (format: org/name)", - examples=["wipp/plugin-example"], - title="Plugin name", - ) - version: Annotated[str, StringConstraints(pattern=r"^(.*)$", min_length=1)] = Field( - ..., - description="Version of the plugin (semantic versioning preferred)", - examples=["1.0.0"], - title="Plugin version", - ) - title: Annotated[str, StringConstraints(pattern=r"^(.*)$", min_length=1)] = Field( - ..., - description="Plugin title to display in WIPP forms", - examples=["WIPP Plugin example"], - title="Plugin title", - ) - description: Annotated[ - str, - StringConstraints(pattern=r"^(.*)$", min_length=1), - ] = Field( - ..., - examples=["Custom image segmentation plugin"], - title="Short description of the plugin", - ) - author: Annotated[str, StringConstraints(pattern="^(.*)$")] | None | None = Field( - "", - examples=["FirstName LastName"], - title="Author(s)", - ) - institution: Annotated[ - str, - StringConstraints(pattern="^(.*)$"), - ] | None | None = Field( - "", - examples=["National Institute of Standards and Technology"], - title="Institution", - ) - repository: AnyUrl | None | None = Field( - "", - examples=["https://github.com/usnistgov/WIPP"], - title="Source code repository", - ) - website: AnyUrl | None | None = Field( - "", - examples=["http://usnistgov.github.io/WIPP"], - title="Website", - ) - citation: Annotated[str, StringConstraints(pattern="^(.*)$")] | None | None = Field( - "", - examples=[ - "Peter Bajcsy, Joe Chalfoun, and Mylene Simon (2018). Web Microanalysis of Big Image Data. Springer-Verlag International", - ], - title="Citation", - ) - containerId: Annotated[str, StringConstraints(pattern=r"^(.*)$")] = Field( - ..., - description="Docker image ID", - examples=["docker.io/wipp/plugin-example:1.0.0"], - title="ContainerId", - ) - baseCommand: list[str] | None = Field( - None, - description="Base command to use while running container image", - examples=[["python3", "/opt/executable/main.py"]], - title="Base command", - ) - inputs: set[Input] = Field( - ..., - description="Defines inputs to the plugin", - title="List of Inputs", - ) - outputs: list[Output] = Field( - ..., - description="Defines the outputs of the plugin", - title="List of Outputs", - ) - ui: list[UiItem] = Field(..., title="Plugin form UI definition") - resourceRequirements: ResourceRequirements | None = Field( - {}, - examples=[ - { - "ramMin": 2048, - "coresMin": 1, - "cpuAVX": True, - "cpuAVX2": False, - "gpu": True, - "cudaRequirements": { - "deviceMemoryMin": 100, - "cudaComputeCapability": "8.0", - }, - }, - ], - title="Plugin Resource Requirements", - ) diff --git a/src/polus/tabular/_plugins/models/pydanticv2/__init__.py b/src/polus/tabular/_plugins/models/pydanticv2/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/polus/tabular/_plugins/models/pydanticv2/compute.py b/src/polus/tabular/_plugins/models/pydanticv2/compute.py deleted file mode 100644 index a7dc5b2..0000000 --- a/src/polus/tabular/_plugins/models/pydanticv2/compute.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Extending automatically generated compute model. - -This file modifies and extend certain fields and -functions of PolusComputeSchema.py which is automatically -generated by datamodel-codegen from JSON schema. -""" - -from polus.tabular._plugins.io import IOBase -from polus.tabular._plugins.io import Version -from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import PluginInput -from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import PluginOutput -from polus.tabular._plugins.models.pydanticv2.PolusComputeSchema import PluginSchema - - -class PluginInput(PluginInput, IOBase): # type: ignore - """Base Class for Input Args.""" - - -class PluginOutput(PluginOutput, IOBase): # type: ignore - """Base Class for Output Args.""" - - -class PluginSchema(PluginSchema): # type: ignore - """Extended Compute Plugin Schema with extended IO defs.""" - - inputs: list[PluginInput] - outputs: list[PluginOutput] - version: Version diff --git a/src/polus/tabular/_plugins/models/pydanticv2/wipp.py b/src/polus/tabular/_plugins/models/pydanticv2/wipp.py deleted file mode 100644 index caf757e..0000000 --- a/src/polus/tabular/_plugins/models/pydanticv2/wipp.py +++ /dev/null @@ -1,79 +0,0 @@ -"""Extending automatically generated wipp model. - -This file modifies and extend certain fields and -functions of WIPPPluginSchema.py which is automatically -generated by datamodel-codegen from JSON schema. -""" -from typing import Literal -from typing import Optional -from typing import Union - -from polus.tabular._plugins.io import Input -from polus.tabular._plugins.io import Output -from polus.tabular._plugins.io import Version -from polus.tabular._plugins.models.pydanticv2.WIPPPluginSchema import ( - ResourceRequirements, -) -from polus.tabular._plugins.models.pydanticv2.WIPPPluginSchema import WippPluginManifest -from pydantic import BaseModel -from pydantic import Field - - -class UI1(BaseModel): - """Base class for UI items.""" - - key: str = Field(constr=r"^inputs.[a-zA-Z0-9][-a-zA-Z0-9]*$") - title: str - description: Optional[str] = None - condition: Optional[str] = None - default: Optional[Union[str, float, int, bool]] = None - hidden: Optional[bool] = Field(default=False) - bind: Optional[str] = None - - -class FieldSet(BaseModel): - """Base class for FieldSet.""" - - title: str - fields: set[str] = Field(min_length=1) - - -class UI2(BaseModel): - """UI items class for fieldsets.""" - - key: Literal["fieldsets"] - fieldsets: set[FieldSet] = Field(min_length=1) - - -class WIPPPluginManifest(WippPluginManifest): - """Extended WIPP Plugin Schema.""" - - inputs: list[Input] = Field( - ..., - description="Defines inputs to the plugin", - title="List of Inputs", - ) - outputs: list[Output] = Field( - ..., - description="Defines the outputs of the plugin", - title="List of Outputs", - ) - ui: list[Union[UI1, UI2]] = Field(..., title="Plugin form UI definition") - version: Version - resourceRequirements: Optional[ResourceRequirements] = Field( # noqa - None, - examples=[ - { - "ramMin": 2048, - "coresMin": 1, - "cpuAVX": True, - "cpuAVX2": False, - "gpu": True, - "cudaRequirements": { - "deviceMemoryMin": 100, - "cudaComputeCapability": "8.0", - }, - }, - ], - title="Plugin Resource Requirements", - ) diff --git a/src/polus/tabular/_plugins/models/wipp-plugin-manifest-schema.json b/src/polus/tabular/_plugins/models/wipp-plugin-manifest-schema.json deleted file mode 100644 index 8a407ae..0000000 --- a/src/polus/tabular/_plugins/models/wipp-plugin-manifest-schema.json +++ /dev/null @@ -1,726 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "$id": "https://raw.githubusercontent.com/usnistgov/WIPP-Plugins-base-templates/master/plugin-manifest/schema/wipp-plugin-manifest-schema.json", - "type": "object", - "title": "WIPP Plugin manifest", - "default": null, - "required": [ - "name", - "version", - "title", - "description", - "containerId", - "inputs", - "outputs", - "ui" - ], - "properties": { - "name": { - "$id": "#/properties/name", - "type": "string", - "title": "Plugin name", - "description": "Name of the plugin (format: org/name)", - "default": "", - "examples": [ - "wipp/plugin-example" - ], - "minLength": 1, - "pattern": "^(.*)$" - }, - "version": { - "$id": "#/properties/version", - "type": "string", - "title": "Plugin version", - "description": "Version of the plugin (semantic versioning preferred)", - "default": "", - "examples": [ - "1.0.0" - ], - "minLength": 1, - "pattern": "^(.*)$" - }, - "title": { - "$id": "#/properties/title", - "type": "string", - "title": "Plugin title", - "description": "Plugin title to display in WIPP forms", - "default": "", - "examples": [ - "WIPP Plugin example" - ], - "minLength": 1, - "pattern": "^(.*)$" - }, - "description": { - "$id": "#/properties/description", - "type": "string", - "title": "Description", - "title": "Short description of the plugin", - "default": "", - "examples": [ - "Custom image segmentation plugin" - ], - "minLength": 1, - "pattern": "^(.*)$" - }, - "author": { - "$id": "#/properties/author", - "type": ["string", "null"], - "title": "Author(s)", - "default": "", - "examples": [ - "FirstName LastName" - ], - "pattern": "^(.*)$" - }, - "institution": { - "$id": "#/properties/institution", - "type": ["string", "null"], - "title": "Institution", - "default": "", - "examples": [ - "National Institute of Standards and Technology" - ], - "pattern": "^(.*)$" - }, - "repository": { - "$id": "#/properties/repository", - "type": ["string", "null"], - "title": "Source code repository", - "default": "", - "examples": [ - "https://github.com/usnistgov/WIPP" - ], - "format": "uri" - }, - "website": { - "$id": "#/properties/website", - "type": ["string", "null"], - "title": "Website", - "default": "", - "examples": [ - "http://usnistgov.github.io/WIPP" - ], - "format": "uri" - }, - "citation": { - "$id": "#/properties/citation", - "type": ["string", "null"], - "title": "Citation", - "default": "", - "examples": [ - "Peter Bajcsy, Joe Chalfoun, and Mylene Simon (2018). Web Microanalysis of Big Image Data. Springer-Verlag International" - ], - "pattern": "^(.*)$" - }, - "containerId": { - "$id": "#/properties/containerId", - "type": "string", - "title": "ContainerId", - "description": "Docker image ID", - "default": "", - "examples": [ - "docker.io/wipp/plugin-example:1.0.0" - ], - "pattern": "^(.*)$" - }, - "baseCommand": { - "$id": "#/properties/baseCommand", - "type": "array", - "title": "Base command", - "description": "Base command to use while running container image", - "default": null, - "items": { - "type": "string" - }, - "examples": [ - ["python3", "/opt/executable/main.py"] - ] - }, - "inputs": { - "$id": "#/properties/inputs", - "type": "array", - "title": "List of Inputs", - "description": "Defines inputs to the plugin", - "default": null, - "uniqueItems": true, - "items": { - "$id": "#/properties/inputs/items", - "type": "object", - "title": "Input", - "description": "Plugin input", - "default": null, - "required": [ - "name", - "type", - "description" - ], - "properties": { - "name": { - "$id": "#/properties/inputs/items/properties/name", - "type": "string", - "title": "Input name", - "description": "Input name as expected by the plugin CLI", - "default": "", - "examples": [ - "inputImages", - "fileNamePattern", - "thresholdValue" - ], - "pattern": "^[a-zA-Z0-9][-a-zA-Z0-9]*$" - }, - "type": { - "$id": "#/properties/inputs/items/properties/type", - "type": "string", - "enum": [ - "collection", - "stitchingVector", - "tensorflowModel", - "csvCollection", - "pyramid", - "pyramidAnnotation", - "notebook", - "genericData", - "string", - "number", - "integer", - "enum", - "array", - "boolean" - ], - "title": "Input Type", - "examples": [ - "collection", - "string", - "number" - ] - }, - "description": { - "$id": "#/properties/inputs/items/properties/description", - "type": "string", - "title": "Input description", - "examples": [ - "Input Images" - ], - "pattern": "^(.*)$" - }, - "required": { - "$id": "#/properties/inputs/items/properties/required", - "type": "boolean", - "title": "Required input", - "description": "Whether an input is required or not", - "default": true, - "examples": [ - true - ] - } - }, - "allOf": [ - { - "if": { - "properties": { "type": { "const": "enum" } } - }, - "then": { - "properties": - { - "options": - { - "$id": "#/properties/inputs/items/properties/options", - "type": "object", - "title": "Input options", - "properties": - { - "values": - { - "type": "array", - "description": "List of possible values", - "items": - { - "type": "string" - }, - "uniqueItems": true - } - } - } - } - } - }, - { - "if": { - "properties": { "type": { "const": "array" } } - }, - "then": { - "properties": - { - "options": - { - "$id": "#/properties/inputs/items/properties/options", - "type": "object", - "title": "Input options", - "properties": - { - "items": { - "$id": "#/properties/inputs/items/properties/options/properties/items", - "type": "object", - "title": "List of array items", - "description": "Possible values for the input array", - "default": {}, - "required": [ - "type", - "title", - "oneOf", - "default", - "widget", - "minItems", - "uniqueItems" - ], - "properties": { - "type": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/type", - "type": "string", - "title": "Items type", - "description": "Type of the items to be selected", - "enum": ["string"], - "examples": [ - "string" - ] - }, - "title": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/title", - "type": "string", - "title": "Selection title", - "description": "Title of the item selection section in the form", - "default": "", - "examples": [ - "Select feature" - ] - }, - "oneOf": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf", - "type": "array", - "title": "Possible items", - "description": "List of possible items", - "default": [], - "items": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf/items", - "type": "object", - "title": "Items definition", - "description": "Description of the possible items", - "default": {}, - "required": [ - "description", - "enum" - ], - "properties": { - "description": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf/items/properties/description", - "type": "string", - "title": "Description", - "description": "Description of the value that will appear in the form", - "default": "", - "examples": [ - "Area" - ] - }, - "enum": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf/items/properties/enum", - "type": "array", - "title": "Value", - "description": "Values of the selected item", - "default": [], - "items": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/oneOf/items/properties/enum/items", - "type": "string", - "title": "List of values", - "description": "List of values associated with the selected item (usually one value)", - "default": "", - "examples": [ - "Feature2DJava_Area" - ] - } - } - }, - "examples": [ - { - "description": "Area", - "enum": [ - "Feature2DJava_Area" - ] - }, - { - "enum": [ - "Feature2DJava_Mean" - ], - "description": "Mean" - } - ] - } - }, - "default": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/default", - "type": "string", - "title": "Default value", - "description": "Value selected by default (must be one of the possible values)", - "default": "", - "examples": [ - "Feature2DJava_Area" - ] - }, - "widget": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/widget", - "type": "string", - "title": "Item selection widget", - "description": "How items can be selected (select -> dropdown list with add/remove buttons, checkbox -> multi-selection from list)", - "enum": ["select", "checkbox"], - "examples": [ - "select" - ] - }, - "minItems": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/minItems", - "type": "integer", - "title": "Minumum number of items", - "description": "Minumum number of items", - "default": 0, - "examples": [ - 1 - ] - }, - "uniqueItems": { - "$id": "#/properties/inputs/items/properties/options/properties/items/properties/uniqueItems", - "type": ["string", "boolean"], - "title": "Uniqueness of the items", - "description": "Whether items in the array have to be unique", - "examples": [ - "true", true - ] - } - }, - "examples": [ - { - "type": "string", - "widget": "select", - "uniqueItems": "true", - "default": "Feature2DJava_Area", - "minItems": 1, - "title": "Select feature", - "oneOf": [ - { - "description": "Area", - "enum": [ - "Feature2DJava_Area" - ] - }, - { - "description": "Mean", - "enum": [ - "Feature2DJava_Mean" - ] - } - ] - } - ] - } - } - } - } - } - } - ] - } - }, - "outputs": { - "$id": "#/properties/outputs", - "type": "array", - "title": "List of Outputs", - "description": "Defines the outputs of the plugin", - "default": null, - "items": { - "$id": "#/properties/outputs/items", - "type": "object", - "title": "Plugin output", - "default": null, - "required": [ - "name", - "type", - "description" - ], - "properties": { - "name": { - "$id": "#/properties/outputs/items/properties/name", - "type": "string", - "title": "Output name", - "default": "", - "examples": [ - "outputCollection" - ], - "pattern": "^[a-zA-Z0-9][-a-zA-Z0-9]*$" - }, - "type": { - "$id": "#/properties/outputs/items/properties/type", - "type": "string", - "enum": [ - "collection", - "stitchingVector", - "tensorflowModel", - "tensorboardLogs", - "csvCollection", - "pyramid", - "pyramidAnnotation", - "genericData" - ], - "title": "Output type", - "examples": [ - "stitchingVector", - "collection" - ] - }, - "description": { - "$id": "#/properties/outputs/items/properties/description", - "type": "string", - "title": "Output description", - "examples": [ - "Output collection" - ], - "pattern": "^(.*)$" - } - } - } - }, - "ui": { - "$id": "#/properties/ui", - "type": "array", - "title": "Plugin form UI definition", - "items": - { - "type": "object", - "title": "List of UI definitions", - "required": [ - "key" - ], - "properties": { - "key": { - "$id": "#/properties/ui/items/properties/key", - "type": "string", - "title": "UI key", - "description": "Key of the input which this UI definition applies to, the expected format is 'inputs.inputName'. Special keyword 'fieldsets' can be used to define arrangement of inputs by sections.", - "examples": [ - "inputs.inputImages", "inputs.fileNamPattern", "fieldsets" - ], - "oneOf": [ - {"pattern": "^inputs\\.[a-zA-Z0-9][-a-zA-Z0-9]*$"}, - {"const": "fieldsets"} - ] - } - }, - "allOf": [ - { - "if": { - "properties": { "key": { "pattern": "^inputs\\.[a-zA-Z0-9][-a-zA-Z0-9]*$" } } - }, - "then": { - "properties": - { - "title": { - "$id": "#/properties/ui/items/properties/title", - "type": "string", - "title": "Input label", - "default": "", - "examples": [ - "Input images: " - ], - "pattern": "^(.*)$" - }, - "description": { - "$id": "#/properties/ui/items/properties/description", - "type": "string", - "title": "Input placeholder", - "default": "", - "examples": [ - "Pick a collection..." - ], - "pattern": "^(.*)$" - }, - "condition": { - "$id": "#/properties/ui/items/properties/condition", - "type": "string", - "title": "Input visibility condition", - "description": "Definition of when this field is visible or not, depending on the value of another input, the expected format for the condition is 'model.inputs.inputName==value'", - "default": "", - "examples": [ - "model.inputs.thresholdtype=='Manual'" - ], - "pattern": "^(.*)$" - }, - "default": { - "$id": "#/properties/ui/items/properties/default", - "type": ["string", "number", "integer", "boolean"], - "title": "Input default value", - "default": "", - "examples": [ - 5, false, ".ome.tif" - ] - }, - "hidden": { - "$id": "#/properties/ui/items/properties/hidden", - "type": "boolean", - "title": "Hidden input", - "description": "Hidden input will not be displayed on the form, but can be used in conjunction with the 'default' or 'bind' properties to define default or automatically set parameters", - "default": false, - "examples": [ - true, false - ] - }, - "bind": { - "$id": "#/properties/ui/items/properties/bind", - "type": "string", - "title": "Bind input value to another input", - "examples": [ - "gridWidth" - ] - } - }, - "required": [ - "title" - ] - } - }, - { - "if": { - "properties": { "key": { "const": "fieldsets" } } - }, - "then": { - "properties": - { - "fieldsets": - { - "description": "A list of definitions representing sections of input fields.", - "type": "array", - "items": { - "description": "A section of input fields.", - "type": "object", - "properties": { - "title": { - "type": "string", - "description": "The label of the section.", - "examples": [ - "Input images selection" - ] - }, - "fields": { - "description": "A list of input names representing input fields that belong to this section.", - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true, - "minItems": 1, - "examples": [ - "inputImages, fileNamePattern" - ] - } - }, - "uniqueItems": true, - "default": [], - "minItems": 1, - "required": [ - "title", "fields" - ] - } - } - }, - "required": [ - "fieldsets" - ] - } - } - ] - } - }, - "resourceRequirements": { - "type": "object", - "default": {}, - "title": "Plugin Resource Requirements", - "properties": { - "ramMin": { - "type": "number", - "title": "Minimum RAM in mebibytes (Mi)", - "examples": [ - 2048 - ] - }, - "coresMin": { - "type": "number", - "title": "Minimum number of CPU cores", - "examples": [ - 1 - ] - }, - "cpuAVX": { - "type": "boolean", - "default": false, - "title": "Advanced Vector Extensions (AVX) CPU capability required", - "examples": [ - true - ] - }, - "cpuAVX2": { - "type": "boolean", - "default": false, - "title": "Advanced Vector Extensions 2 (AVX2) CPU capability required", - "examples": [ - false - ] - }, - "gpu": { - "type": "boolean", - "default": false, - "title": "GPU/accelerator required", - "examples": [ - true - ] - }, - "cudaRequirements": { - "type": "object", - "default": {}, - "title": "GPU Cuda-related requirements", - "properties": { - "deviceMemoryMin": { - "type": "number", - "default": 0, - "title": "Minimum device memory", - "examples": [ - 100 - ] - }, - "cudaComputeCapability": { - "type": ["string", "array"], - "default": null, - "title": "The cudaComputeCapability Schema", - "description": "Specify either a single minimum value, or an array of valid values", - "examples": [ - "8.0", - ["3.5", "5.0", "6.0", "7.0", "7.5", "8.0"] - ] - } - }, - "examples": [{ - "deviceMemoryMin": 100, - "cudaComputeCapability": "8.0" - }] - } - }, - "examples": [{ - "ramMin": 2048, - "coresMin": 1, - "cpuAVX": true, - "cpuAVX2": false, - "gpu": true, - "cudaRequirements": { - "deviceMemoryMin": 100, - "cudaComputeCapability": "8.0" - } - }] - } - } - } diff --git a/src/polus/tabular/_plugins/registry.py b/src/polus/tabular/_plugins/registry.py deleted file mode 100644 index dfb0daa..0000000 --- a/src/polus/tabular/_plugins/registry.py +++ /dev/null @@ -1,280 +0,0 @@ -"""Methods to interact with REST API of WIPP Plugin Registry.""" -import json -import logging -import typing -from urllib.error import HTTPError -from urllib.parse import urljoin - -import requests -import xmltodict -from tqdm import tqdm - -from polus.tabular._plugins.classes import ComputePlugin, Plugin, refresh, submit_plugin -from polus.tabular._plugins.registry_utils import _generate_query, _to_xml - -logger = logging.getLogger("polus.tabular") - - -class FailedToPublish(Exception): - """Raised when there is an error publishing a resource.""" - - -class MissingUserInfo(Exception): - """Raised when necessary user info is not provided for authentication.""" - - -class WippPluginRegistry: - """Class that contains methods to interact with the REST API of WIPP Registry.""" - - def __init__( - self, - username: typing.Optional[str] = None, - password: typing.Optional[str] = None, - registry_url: str = "https://wipp-registry.ci.ncats.io", - verify: bool = True, # verify SSL? - ): - """Initialize WippPluginRegistry from username, password, registry url.""" - self.registry_url = registry_url - self.username = username - self.password = password - self.verify = verify - - @classmethod - def _parse_xml(cls, xml: str): - """Return dictionary of Plugin Manifest. If error, return None.""" - d = xmltodict.parse(xml)["Resource"]["role"]["PluginManifest"][ - "PluginManifestContent" - ]["#text"] - try: - return json.loads(d) - except BaseException: - e = eval(d) - if isinstance(e, dict): - return e - else: - return None - - def update_plugins(self): - """Update plugins from WIPP Registry.""" - url = self.registry_url + "/rest/data/query/" - headers = {"Content-type": "application/json"} - data = '{"query": {"$or":[{"Resource.role.type":"Plugin"},{"Resource.role.type.#text":"Plugin"}]}}' - if self.username and self.password: - r = requests.post( - url, - headers=headers, - data=data, - auth=(self.username, self.password), - verify=self.verify, - ) # authenticated request - else: - r = requests.post(url, headers=headers, data=data, verify=self.verify) - valid, invalid = 0, {} - - for r in tqdm(r.json()["results"], desc="Updating Plugins from WIPP"): - try: - manifest = WippPluginRegistry._parse_xml(r["xml_content"]) - submit_plugin(manifest) - valid += 1 - except BaseException as err: - invalid.update({r["title"]: err.args[0]}) - - finally: - if len(invalid) > 0: - self.invalid = invalid - logger.debug( - "Submitted %s plugins successfully. See WippPluginRegistry.invalid to check errors in unsubmitted plugins" - % (valid) - ) - logger.debug("Submitted %s plugins successfully." % (valid)) - refresh() - - def query( - self, - title: typing.Optional[str] = None, - version: typing.Optional[str] = None, - title_contains: typing.Optional[str] = None, - contains: typing.Optional[str] = None, - query_all: bool = False, - advanced: bool = False, - query: typing.Optional[str] = None, - ): - """Query Plugins in WIPP Registry. - - This function executes queries for Plugins in the WIPP Registry. - - Args: - title: - title of the plugin to query. - Example: "OME Tiled Tiff Converter" - version: - version of the plugins to query. - Must follow semantic versioning. Example: "1.1.0" - title_contains: - keyword that must be part of the title of plugins to query. - Example: "Converter" will return all plugins with the word "Converter" in their title - contains: - keyword that must be part of the description of plugins to query. - Example: "bioformats" will return all plugins with the word "bioformats" in their description - query_all: if True it will override any other parameter and will return all plugins - advanced: - if True it will override any other parameter. - `query` must be included - query: query to execute. This query must be in MongoDB format - - - Returns: - An array of the manifests of the Plugins returned by the query. - """ - url = self.registry_url + "/rest/data/query/" - headers = {"Content-type": "application/json"} - query = _generate_query( - title, version, title_contains, contains, query_all, advanced, query - ) - - data = '{"query": %s}' % str(query).replace("'", '"') - - if self.username and self.password: - r = requests.post( - url, - headers=headers, - data=data, - auth=(self.username, self.password), - verify=self.verify, - ) # authenticated request - else: - r = requests.post(url, headers=headers, data=data, verify=self.verify) - logger.debug(f"r is {r.raise_for_status}") - return [ - WippPluginRegistry._parse_xml(x["xml_content"]) for x in r.json()["results"] - ] - - def get_current_schema(self): - """Return current schema in WIPP.""" - r = requests.get( - urljoin( - self.registry_url, - "rest/template-version-manager/global/?title=res-md.xsd", - ), - verify=self.verify, - ) - if r.ok: - return r.json()[0]["current"] - else: - r.raise_for_status() - - def upload( - self, - plugin: typing.Union[Plugin, ComputePlugin], - author: typing.Optional[str] = None, - email: typing.Optional[str] = None, - publish: bool = True, - ): - """Upload Plugin to WIPP Registry. - - This function uploads a Plugin object to the WIPP Registry. - Author name and email to be passed to the Plugin object - information on the WIPP Registry are taken from the value - of the field `author` in the `Plugin` manifest. That is, - the first email and the first name (first and last) will - be passed. The value of these two fields can be overridden - by specifying them in the arguments. - - Args: - plugin: - Plugin to be uploaded - author: - Optional `str` to override author name - email: - Optional `str` to override email - publish: - If `False`, Plugin will not be published to the public - workspace. It will be visible only to the user uploading - it. Default is `True` - - Returns: - A message indicating a successful upload. - """ - manifest = plugin.manifest - - xml_content = _to_xml(manifest, author, email) - - schema_id = self.get_current_schema() - - data = { - "title": manifest["name"], - "template": schema_id, - "xml_content": xml_content, - } - - url = self.registry_url + "/rest/data/" - headers = {"Content-type": "application/json"} - if self.username and self.password: - r = requests.post( - url, - headers=headers, - data=json.dumps(data), - auth=(self.username, self.password), - verify=self.verify, - ) # authenticated request - else: - raise MissingUserInfo("The registry connection must be authenticated.") - - response_code = r.status_code - - if response_code != 201: - print( - "Error uploading file (%s), code %s" - % (data["title"], str(response_code)) - ) - r.raise_for_status() - if publish: - _id = r.json()["id"] - _purl = url + _id + "/publish/" - r2 = requests.patch( - _purl, - headers=headers, - auth=(self.username, self.password), - verify=self.verify, - ) - try: - r2.raise_for_status() - except HTTPError as err: - raise FailedToPublish( - "Failed to publish {} with id {}".format(data["title"], _id) - ) from err - - return "Successfully uploaded %s" % data["title"] - - def get_resource_by_pid(self, pid): - """Return current resource.""" - response = requests.get(pid, verify=self.verify) - return response.json() - - def patch_resource( - self, - pid, - version, - ): - """Patch resource in registry.""" - if self.username is None or self.password is None: - raise MissingUserInfo("The registry connection must be authenticated.") - - # Get current version of the resource - data = self.get_resource_by_pid(pid) - - data.update({"version": version}) - response = requests.patch( - urljoin(self.registry_url, "rest/data/" + data["id"]), - data, - auth=(self.username, self.password), - verify=self.verify, - ) - response_code = response.status_code - - if response_code != 200: - print( - "Error publishing data (%s), code %s" - % (data["title"], str(response_code)) - ) - response.raise_for_status() diff --git a/src/polus/tabular/_plugins/registry_utils.py b/src/polus/tabular/_plugins/registry_utils.py deleted file mode 100644 index 8c2bd51..0000000 --- a/src/polus/tabular/_plugins/registry_utils.py +++ /dev/null @@ -1,135 +0,0 @@ -"""Utilities for WIPP Registry Module.""" -import re -import typing - - -def _generate_query( - title, version, title_contains, contains, query_all, advanced, query -): - if advanced: - if not query: - raise ValueError("query cannot be empty if advanced is True") - else: - return query - if query_all: - q = { - "$or": [ - {"Resource.role.type": "Plugin"}, - {"Resource.role.type.#text": "Plugin"}, - ] - } # replace query - return q - - # Check for possible errors: - if title and title_contains: - raise ValueError("Cannot define title and title_contains together") - q = {} # query to return - q["$and"] = [] - q["$and"].append( - { - "$or": [ - {"Resource.role.type": "Plugin"}, - {"Resource.role.type.#text": "Plugin"}, - ] - } - ) - if title: - q["$and"].append( - { - "$or": [ - {"Resource.identity.title.#text": title}, - {"Resource.identity.title": title}, - ] - } - ) - if version: - q["$and"].append( - { - "$or": [ - {"Resource.identity.version.#text": version}, - {"Resource.identity.version": version}, - ] - } - ) - if contains: - q["$and"].append( - { - "$or": [ - { - "Resource.content.description.#text": { - "$regex": f".*{contains}.*", - "$options": "i", - } - }, - { - "Resource.content.description": { - "$regex": f".*{contains}.*", - "$options": "i", - } - }, - ] - } - ) - if title_contains: - q["$and"].append( - { - "$or": [ - { - "Resource.identity.title.#text": { - "$regex": f".*{title_contains}.*", - "$options": "i", - } - }, - { - "Resource.identity.title": { - "$regex": f".*{title_contains}.*", - "$options": "i", - } - }, - ] - } - ) - return q - - -def _get_email(author: str): - regex = re.compile(r"[A-Za-z][A-Za-z0-9.]*@[A-Za-z0-9.]*") - return regex.search(author).group() - - -def _get_author(author: str): - return " ".join(author.split()[0:2]) - - -def _to_xml( - manifest: dict, - author: typing.Optional[str] = None, - email: typing.Optional[str] = None, -): - if email is None: - email = _get_email(manifest["author"]) - if author is None: - author = _get_author(manifest["author"]) - - xml = ( - '' - f'{manifest["name"]}' - f'{str(manifest["version"])}' - '' - f'{manifest["institution"]}' - '' - f'{author}' - f'{email}' - '' - f'{manifest["description"]}' - '' - 'Plugin' - f'{manifest["containerId"]}' - '' - f'{str(manifest)}' - ) - - return xml diff --git a/src/polus/tabular/_plugins/update/__init__.py b/src/polus/tabular/_plugins/update/__init__.py deleted file mode 100644 index a6fef0d..0000000 --- a/src/polus/tabular/_plugins/update/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Initialize update module.""" - -from polus.tabular._plugins.update._update import update_nist_plugins -from polus.tabular._plugins.update._update import update_polus_plugins - -__all__ = ["update_polus_plugins", "update_nist_plugins"] diff --git a/src/polus/tabular/_plugins/update/_update.py b/src/polus/tabular/_plugins/update/_update.py deleted file mode 100644 index 4998ee5..0000000 --- a/src/polus/tabular/_plugins/update/_update.py +++ /dev/null @@ -1,116 +0,0 @@ -# pylint: disable=W1203, W1201 -import json -import logging -import re -import typing - -from polus.tabular._plugins._compat import PYDANTIC_V2 -from polus.tabular._plugins.classes import refresh -from polus.tabular._plugins.classes import submit_plugin -from polus.tabular._plugins.gh import _init_github -from polus.tabular._plugins.io import Version -from polus.tabular._plugins.manifests import _error_log -from polus.tabular._plugins.manifests import _scrape_manifests -from pydantic import ValidationError -from tqdm import tqdm - -logger = logging.getLogger("polus.tabular") - - -def update_polus_plugins( - gh_auth: typing.Optional[str] = None, - min_depth: int = 2, - max_depth: int = 3, -) -> None: - """Scrape PolusAI GitHub repo and create local versions of Plugins.""" - logger.info("Updating polus plugins.") - # Get all manifests - valid, invalid = _scrape_manifests( - "polusai/polus-plugins", - _init_github(gh_auth), - min_depth, - max_depth, - True, - ) - manifests = valid.copy() - manifests.extend(invalid) - logger.info(f"Submitting {len(manifests)} plugins.") - - for manifest in manifests: - try: - plugin = submit_plugin(manifest) - - # Parsing checks specific to polus-plugins - error_list = [] - - # Check that plugin version matches container version tag - container_name, version = tuple(plugin.containerId.split(":")) - version = Version(version) if PYDANTIC_V2 else Version(version=version) - organization, container_name = tuple(container_name.split("/")) - if plugin.version != version: - msg = ( - f"containerId version ({version}) does not " - f"match plugin version ({plugin.version})" - ) - logger.error(msg) - error_list.append(ValueError(msg)) - - # Check to see that the plugin is registered to Labshare - if organization not in ["polusai", "labshare"]: - msg = ( - "all polus plugin containers must be" - " under the Labshare organization." - ) - logger.error(msg) - error_list.append(ValueError(msg)) - - # Checks for container name, they are somewhat related to our - # Jenkins build - if not container_name.startswith("polus"): - msg = "containerId name must begin with polus-" - logger.error(msg) - error_list.append(ValueError(msg)) - - if not container_name.endswith("plugin"): - msg = "containerId name must end with -plugin" - logger.error(msg) - error_list.append(ValueError(msg)) - - if len(error_list) > 0: - raise ValidationError(error_list, plugin.__class__) - - except ValidationError as val_err: - try: - _error_log(val_err, manifest, "update_polus_plugins") - except BaseException as e: # pylint: disable=W0718 - logger.exception(f"In {plugin.name}: {e}") - except BaseException as e: # pylint: disable=W0718 - logger.exception(f"In {plugin.name}: {e}") - refresh() - - -def update_nist_plugins(gh_auth: typing.Optional[str] = None) -> None: - """Scrape NIST GitHub repo and create local versions of Plugins.""" - # Parse README links - gh = _init_github(gh_auth) - repo = gh.get_repo("usnistgov/WIPP") - contents = repo.get_contents("plugins") - readme = [r for r in contents if r.name == "README.md"][0] - pattern = re.compile( - r"\[manifest\]\((https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*))\)", - ) - matches = pattern.findall(str(readme.decoded_content)) - logger.info("Updating NIST plugins.") - for match in tqdm(matches, desc="NIST Manifests"): - url_parts = match[0].split("/")[3:] - plugin_repo = gh.get_repo("/".join(url_parts[:2])) - manifest = json.loads( - plugin_repo.get_contents("/".join(url_parts[4:])).decoded_content, - ) - - try: - submit_plugin(manifest) - - except ValidationError as val_err: - _error_log(val_err, manifest, "update_nist_plugins") - refresh() diff --git a/src/polus/tabular/_plugins/utils.py b/src/polus/tabular/_plugins/utils.py deleted file mode 100644 index 152c1a7..0000000 --- a/src/polus/tabular/_plugins/utils.py +++ /dev/null @@ -1,17 +0,0 @@ -"""General utilities for polus-plugins.""" -from polus.tabular._plugins.io import Version - - -def name_cleaner(name: str) -> str: - """Generate Plugin Class Name from Plugin name in manifest.""" - replace_chars = "()<>-_" - for char in replace_chars: - name = name.replace(char, " ") - return name.title().replace(" ", "").replace("/", "_") - - -def cast_version(value): - """Return Version object from version str or dict.""" - if isinstance(value, dict): # if init from a Version object - value = value["version"] - return Version(version=value) diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index 4ede8e6..0000000 --- a/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# noqa diff --git a/tests/resources/b1.json b/tests/resources/b1.json deleted file mode 100644 index a385c3b..0000000 --- a/tests/resources/b1.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "version": "1.2.7", - "title": "Flatfield correction using BaSiC algorithm.", - "description": "Generates images used for flatfield correction using the BaSiC algorithm.", - "author": "Nick Schaub (nick.schaub@nih.gov)", - "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/polusai/polus-plugins", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", - "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", - "inputs": [ - { - "name": "inpDir", - "type": "collection", - "description": "Input image collection.", - "required": true - }, - { - "name": "filePattern", - "type": "string", - "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", - "required": true - }, - { - "name": "darkfield", - "type": "boolean", - "description": "Calculate darkfield image.", - "required": true - }, - { - "name": "photobleach", - "type": "boolean", - "description": "Calculate photobleaching offsets.", - "required": true - }, - { - "name": "groupBy", - "type": "string", - "description": "Group images together for flatfield by variable.", - "required": false - } - ], - "outputs": [ - { - "name": "outDir", - "type": "collection", - "description": "Output data for the plugin" - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input image collection: ", - "description": "Image collection..." - }, - { - "key": "inputs.filePattern", - "title": "Filename pattern: ", - "description": "Use a filename pattern to calculate flatfield information by subsets" - }, - { - "key": "inputs.groupBy", - "title": "Grouping Variables: ", - "description": "Group data together with varying variable values." - }, - { - "key": "inputs.darkfield", - "title": "Calculate darkfield: ", - "description": "If selected, will generate a darkfield image" - }, - { - "key": "inputs.photobleach", - "title": "Calclate photobleaching offset: ", - "description": "If selected, will generate an offset scalar for each image" - } - ] -} diff --git a/tests/resources/b2.json b/tests/resources/b2.json deleted file mode 100644 index 3e28f5b..0000000 --- a/tests/resources/b2.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "name": "BaSiC Flatfield Correction Plugin", - "version": "1.2.7", - "description": "Generates images used for flatfield correction using the BaSiC algorithm.", - "author": "Nick Schaub (nick.schaub@nih.gov)", - "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/polusai/polus-plugins", - "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", - "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", - "inputs": [ - { - "name": "inpDir", - "type": "collection", - "description": "Input image collection.", - "required": true - }, - { - "name": "filePattern", - "type": "string", - "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", - "required": true - }, - { - "name": "darkfield", - "type": "boolean", - "description": "Calculate darkfield image.", - "required": true - }, - { - "name": "photobleach", - "type": "boolean", - "description": "Calculate photobleaching offsets.", - "required": true - }, - { - "name": "groupBy", - "type": "string", - "description": "Group images together for flatfield by variable.", - "required": false - } - ], - "outputs": [ - { - "name": "outDir", - "type": "collection", - "description": "Output data for the plugin" - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input image collection: ", - "description": "Image collection..." - }, - { - "key": "inputs.filePattern", - "title": "Filename pattern: ", - "description": "Use a filename pattern to calculate flatfield information by subsets" - }, - { - "key": "inputs.groupBy", - "title": "Grouping Variables: ", - "description": "Group data together with varying variable values." - }, - { - "key": "inputs.darkfield", - "title": "Calculate darkfield: ", - "description": "If selected, will generate a darkfield image" - }, - { - "key": "inputs.photobleach", - "title": "Calclate photobleaching offset: ", - "description": "If selected, will generate an offset scalar for each image" - } - ] -} diff --git a/tests/resources/b3.json b/tests/resources/b3.json deleted file mode 100644 index f161974..0000000 --- a/tests/resources/b3.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "name": "BaSiC Flatfield Correction Plugin", - "version": "1.2.7", - "title": "Flatfield correction using BaSiC algorithm.", - "author": "Nick Schaub (nick.schaub@nih.gov)", - "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/polusai/polus-plugins", - "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", - "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", - "inputs": [ - { - "name": "inpDir", - "type": "collection", - "description": "Input image collection.", - "required": true - }, - { - "name": "filePattern", - "type": "string", - "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", - "required": true - }, - { - "name": "darkfield", - "type": "boolean", - "description": "Calculate darkfield image.", - "required": true - }, - { - "name": "photobleach", - "type": "boolean", - "description": "Calculate photobleaching offsets.", - "required": true - }, - { - "name": "groupBy", - "type": "string", - "description": "Group images together for flatfield by variable.", - "required": false - } - ], - "outputs": [ - { - "name": "outDir", - "type": "collection", - "description": "Output data for the plugin" - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input image collection: ", - "description": "Image collection..." - }, - { - "key": "inputs.filePattern", - "title": "Filename pattern: ", - "description": "Use a filename pattern to calculate flatfield information by subsets" - }, - { - "key": "inputs.groupBy", - "title": "Grouping Variables: ", - "description": "Group data together with varying variable values." - }, - { - "key": "inputs.darkfield", - "title": "Calculate darkfield: ", - "description": "If selected, will generate a darkfield image" - }, - { - "key": "inputs.photobleach", - "title": "Calclate photobleaching offset: ", - "description": "If selected, will generate an offset scalar for each image" - } - ] -} diff --git a/tests/resources/g1.json b/tests/resources/g1.json deleted file mode 100644 index ca32f19..0000000 --- a/tests/resources/g1.json +++ /dev/null @@ -1,78 +0,0 @@ -{ - "name": "BaSiC Flatfield Correction Plugin", - "version": "1.2.7", - "title": "Flatfield correction using BaSiC algorithm.", - "description": "Generates images used for flatfield correction using the BaSiC algorithm.", - "author": "Nick Schaub (nick.schaub@nih.gov)", - "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/polusai/polus-plugins", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", - "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", - "inputs": [ - { - "name": "inpDir", - "type": "collection", - "description": "Input image collection.", - "required": true - }, - { - "name": "filePattern", - "type": "string", - "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", - "required": true - }, - { - "name": "darkfield", - "type": "boolean", - "description": "Calculate darkfield image.", - "required": true - }, - { - "name": "photobleach", - "type": "boolean", - "description": "Calculate photobleaching offsets.", - "required": true - }, - { - "name": "groupBy", - "type": "string", - "description": "Group images together for flatfield by variable.", - "required": false - } - ], - "outputs": [ - { - "name": "outDir", - "type": "collection", - "description": "Output data for the plugin" - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input image collection: ", - "description": "Image collection..." - }, - { - "key": "inputs.filePattern", - "title": "Filename pattern: ", - "description": "Use a filename pattern to calculate flatfield information by subsets" - }, - { - "key": "inputs.groupBy", - "title": "Grouping Variables: ", - "description": "Group data together with varying variable values." - }, - { - "key": "inputs.darkfield", - "title": "Calculate darkfield: ", - "description": "If selected, will generate a darkfield image" - }, - { - "key": "inputs.photobleach", - "title": "Calclate photobleaching offset: ", - "description": "If selected, will generate an offset scalar for each image" - } - ] -} diff --git a/tests/resources/g2.json b/tests/resources/g2.json deleted file mode 100644 index 24d32be..0000000 --- a/tests/resources/g2.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "version": "1.2.7", - "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "description": "Generates images used for flatfield correction using the BaSiC algorithm.", - "name": "BaSiC Flatfield Correction Plugin", - "author": "Nick Schaub (nick.schaub@nih.gov)", - "repository": "https://github.com/polusai/polus-plugins", - "title": "Flatfield correction using BaSiC algorithm.", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", - "inputs": [ - { - "name": "inpDir", - "type": "collection", - "description": "Input image collection.", - "required": true - }, - { - "name": "filePattern", - "type": "string", - "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", - "required": true - }, - { - "name": "darkfield", - "type": "boolean", - "description": "Calculate darkfield image.", - "required": true - }, - { - "name": "photobleach", - "type": "boolean", - "description": "Calculate photobleaching offsets.", - "required": true - }, - { - "name": "groupBy", - "type": "string", - "description": "Group images together for flatfield by variable.", - "required": false - } - ], - "outputs": [ - { - "name": "outDir", - "type": "collection", - "description": "Output data for the plugin" - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input image collection: ", - "description": "Image collection..." - }, - { - "key": "inputs.filePattern", - "title": "Filename pattern: ", - "description": "Use a filename pattern to calculate flatfield information by subsets" - }, - { - "key": "inputs.groupBy", - "title": "Grouping Variables: ", - "description": "Group data together with varying variable values." - }, - { - "key": "inputs.darkfield", - "title": "Calculate darkfield: ", - "description": "If selected, will generate a darkfield image" - }, - { - "key": "inputs.photobleach", - "title": "Calclate photobleaching offset: ", - "description": "If selected, will generate an offset scalar for each image" - } - ] -} diff --git a/tests/resources/g3.json b/tests/resources/g3.json deleted file mode 100644 index e589644..0000000 --- a/tests/resources/g3.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "name": "BaSiC Flatfield Correction Plugin", - "version": "1.2.7", - "title": "Flatfield correction using BaSiC algorithm.", - "description": "Generates images used for flatfield correction using the BaSiC algorithm.", - "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/polusai/polus-plugins", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "citation": "Peng et al. \"A BaSiC tool for background and shading correction of optical microscopy images\" Nature Communications (2017)", - "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", - "inputs": [ - { - "name": "inpDir", - "type": "collection", - "description": "Input image collection.", - "required": true - }, - { - "name": "filePattern", - "type": "string", - "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", - "required": true - }, - { - "name": "darkfield", - "type": "boolean", - "description": "Calculate darkfield image.", - "required": true - }, - { - "name": "photobleach", - "type": "boolean", - "description": "Calculate photobleaching offsets.", - "required": true - }, - { - "name": "groupBy", - "type": "string", - "description": "Group images together for flatfield by variable.", - "required": false - } - ], - "outputs": [ - { - "name": "outDir", - "type": "collection", - "description": "Output data for the plugin" - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input image collection: ", - "description": "Image collection..." - }, - { - "key": "inputs.filePattern", - "title": "Filename pattern: ", - "description": "Use a filename pattern to calculate flatfield information by subsets" - }, - { - "key": "inputs.groupBy", - "title": "Grouping Variables: ", - "description": "Group data together with varying variable values." - }, - { - "key": "inputs.darkfield", - "title": "Calculate darkfield: ", - "description": "If selected, will generate a darkfield image" - }, - { - "key": "inputs.photobleach", - "title": "Calclate photobleaching offset: ", - "description": "If selected, will generate an offset scalar for each image" - } - ] -} diff --git a/tests/resources/omeconverter022.json b/tests/resources/omeconverter022.json deleted file mode 100644 index b696f46..0000000 --- a/tests/resources/omeconverter022.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "OME Converter", - "version": "0.2.2", - "title": "OME Converter", - "description": "Convert Bioformats supported format to OME Zarr.", - "author": "Nick Schaub (nick.schaub@nih.gov)", - "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/labshare/polus-plugins", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "citation": "", - "containerId": "polusai/ome-converter-plugin:0.2.2", - "inputs": [ - { - "name": "inpDir", - "type": "genericData", - "description": "Input generic data collection to be processed by this plugin", - "required": true - }, - { - "name": "filePatter", - "type": "string", - "description": "A filepattern, used to select data to be converted", - "required": true - } - ], - "outputs": [ - { - "name": "outDir", - "type": "genericData", - "description": "Output collection" - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input generic collection", - "description": "Input generic data collection to be processed by this plugin" - }, - { - "key": "inputs.filePattern", - "title": "Filepattern", - "description": "A filepattern, used to select data for conversion" - } - ] -} diff --git a/tests/resources/tabularconverter.json b/tests/resources/tabularconverter.json deleted file mode 100644 index ea9cfbb..0000000 --- a/tests/resources/tabularconverter.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "name": "Tabular Converter", - "version": "0.1.2-dev1", - "title": "Tabular Converter", - "description": "WIPP plugin allows tabular data conversion arrow file format and vice versa.", - "author": "Kelechi Nina Mezu (nina.mezu@nih.gov), Hamdah Shafqat Abbasi (hamdahshafqat.abbasi@nih.gov)", - "institution": "National Center for Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/PolusAI/tabular-tools", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "citation": "", - "containerId": "polusai/tabular-converter-tool:0.1.2-dev1", - "baseCommand": [ - "python3", - "-m", - "polus.tabular.formats.tabular_converter" - ], - "inputs": [ - { - "name": "inpDir", - "type": "genericData", - "description": "Input data collection to be processed by this plugin", - "required": true - }, - { - "name": "filePattern", - "type": "string", - "description": "Pattern to parse input files", - "required": false - }, - { - "name": "fileExtension", - "type": "enum", - "description": "File format of an output file", - "required": true, - "options": { - "values": [ - ".csv", - ".fits", - ".fcs", - ".feather", - ".parquet", - ".hdf5", - ".arrow", - "default" - ] - } - } - ], - "outputs": [ - { - "name": "outDir", - "type": "genericData", - "description": "Output directory" - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input collection", - "description": "Input data collection to be processed by this plugin" - }, - { - "key": "inputs.filePattern", - "title": "FilePattern", - "description": "Pattern to parse input files", - "default": ".+" - }, - { - "key": "inputs.fileExtension", - "title": "FileExtension", - "description": "Desired file format of an ouput file", - "default": ".arrow" - } - ] -} \ No newline at end of file diff --git a/tests/resources/target1.cwl b/tests/resources/target1.cwl deleted file mode 100644 index 9185bee..0000000 --- a/tests/resources/target1.cwl +++ /dev/null @@ -1,32 +0,0 @@ -class: CommandLineTool -cwlVersion: v1.2 -inputs: - fileExtension: - inputBinding: - prefix: --fileExtension - type: string - filePattern: - inputBinding: - prefix: --filePattern - type: string? - inpDir: - inputBinding: - prefix: --inpDir - type: Directory - outDir: - inputBinding: - prefix: --outDir - type: Directory -outputs: - outDir: - outputBinding: - glob: $(inputs.outDir.basename) - type: Directory -requirements: - DockerRequirement: - dockerPull: polusai/tabular-converter-tool:0.1.2-dev1 - InitialWorkDirRequirement: - listing: - - entry: $(inputs.outDir) - writable: true - InlineJavascriptRequirement: {} diff --git a/tests/test_cwl.py b/tests/test_cwl.py deleted file mode 100644 index 56cb847..0000000 --- a/tests/test_cwl.py +++ /dev/null @@ -1,105 +0,0 @@ -# type: ignore -# pylint: disable=W0621, W0613 -"""Tests for CWL utils.""" -from pathlib import Path - -import pydantic -import pytest -import yaml - -import polus.tabular as pp -from polus.tabular._plugins.classes.plugin_base import MissingInputValuesError - -PYDANTIC_VERSION = pydantic.__version__.split(".")[0] -RSRC_PATH = Path(__file__).parent.joinpath("resources") - -TabularConverter = RSRC_PATH.joinpath("tabularconverter.json") - - -@pytest.fixture -def submit_plugin(): - """Submit TabularConverter plugin.""" - if "TabularConverter" not in pp.list: - pp.submit_plugin(TabularConverter) - else: - if "0.1.2-dev1" not in pp.TabularConverter.versions: - pp.submit_plugin(TabularConverter) - - -@pytest.fixture -def plug(submit_plugin): - """Get TabularConverter plugin.""" - return pp.get_plugin("TabularConverter", "0.1.2-dev1") - - -@pytest.fixture(scope="session") -def cwl_io_path(tmp_path_factory): - """Temp CWL IO path.""" - return tmp_path_factory.mktemp("io") / "tabularconverter_io.yml" - - -@pytest.fixture(scope="session") -def cwl_path(tmp_path_factory): - """Temp CWL IO path.""" - return tmp_path_factory.mktemp("cwl") / "tabularconverter.cwl" - - -@pytest.fixture -def cwl_io(plug, cwl_io_path): - """Test save_cwl IO.""" - rs_path = RSRC_PATH.absolute() - plug.inpDir = rs_path - plug.filePattern = ".*.csv" - plug.fileExtension = ".arrow" - plug.outDir = rs_path - plug.save_cwl_io(cwl_io_path) - - -def test_save_read_cwl(plug, cwl_path): - """Test save and read cwl.""" - plug.save_cwl(cwl_path) - with open(cwl_path, encoding="utf-8") as file: - src_cwl = file.read() - with open(RSRC_PATH.joinpath("target1.cwl"), encoding="utf-8") as file: - target_cwl = file.read() - assert src_cwl == target_cwl - - -def test_save_cwl_io_not_inp(plug, cwl_io_path): - """Test save_cwl IO.""" - with pytest.raises(MissingInputValuesError): - plug.save_cwl_io(cwl_io_path) - - -def test_save_cwl_io_not_inp2(plug, cwl_io_path): - """Test save_cwl IO.""" - plug.inpDir = RSRC_PATH.absolute() - plug.filePattern = "img_r{rrr}_c{ccc}.tif" - with pytest.raises(MissingInputValuesError): - plug.save_cwl_io(cwl_io_path) - - -def test_save_cwl_io_not_yml(plug, cwl_io_path): - """Test save_cwl IO.""" - plug.inpDir = RSRC_PATH.absolute() - plug.filePattern = ".*.csv" - plug.fileExtension = ".arrow" - plug.outDir = RSRC_PATH.absolute() - with pytest.raises(ValueError): - plug.save_cwl_io(cwl_io_path.with_suffix(".txt")) - - -def test_read_cwl_io(cwl_io, cwl_io_path): - """Test read_cwl_io.""" - with open(cwl_io_path, encoding="utf-8") as file: - src_io = yaml.safe_load(file) - assert src_io["inpDir"] == { - "class": "Directory", - "location": str(RSRC_PATH.absolute()), - } - assert src_io["outDir"] == { - "class": "Directory", - "location": str(RSRC_PATH.absolute()), - } - assert src_io["filePattern"] == ".*.csv" - assert src_io["fileExtension"] == ".arrow" diff --git a/tests/test_io.py b/tests/test_io.py deleted file mode 100644 index 5686e0e..0000000 --- a/tests/test_io.py +++ /dev/null @@ -1,69 +0,0 @@ -# pylint: disable=C0103 -"""IO Tests.""" -from pathlib import Path - -import pytest -from fsspec.implementations.local import LocalFileSystem - -from polus.tabular._plugins.classes import _load_plugin -from polus.tabular._plugins.classes.plugin_base import IOKeyError -from polus.tabular._plugins.io import Input, IOBase - -RSRC_PATH = Path(__file__).parent.joinpath("resources") - -io1 = { - "type": "collection", - "name": "input1", - "required": True, - "description": "Test IO", -} -io2 = {"type": "boolean", "name": "input2", "required": True, "description": "Test IO"} -iob1 = { - "type": "collection", -} -plugin = _load_plugin(RSRC_PATH.joinpath("g1.json")) - - -def test_iobase(): - """Test IOBase.""" - IOBase(**iob1) - - -@pytest.mark.parametrize("io", [io1, io2], ids=["io1", "io2"]) -def test_input(io): - """Test Input.""" - Input(**io) - - -def test_set_attr_invalid1(): - """Test setting invalid attribute.""" - with pytest.raises(TypeError): - plugin.inputs[0].examples = [2, 5] - - -def test_set_attr_invalid2(): - """Test setting invalid attribute.""" - with pytest.raises(IOKeyError): - plugin.invalid = False - - -def test_set_attr_valid1(): - """Test setting valid attribute.""" - i = [x for x in plugin.inputs if x.name == "darkfield"] - i[0].value = True - - -def test_set_attr_valid2(): - """Test setting valid attribute.""" - plugin.darkfield = True - - -def test_set_fsspec(): - """Test setting fs valid attribute.""" - plugin._fs = LocalFileSystem() # pylint: disable=protected-access - - -def test_set_fsspec2(): - """Test setting fs invalid attribute.""" - with pytest.raises(ValueError): - plugin._fs = "./" # pylint: disable=protected-access diff --git a/tests/test_manifests.py b/tests/test_manifests.py deleted file mode 100644 index 52f2ea9..0000000 --- a/tests/test_manifests.py +++ /dev/null @@ -1,236 +0,0 @@ -# pylint: disable=C0103 -"""Test manifests utils.""" -from collections import OrderedDict -from pathlib import Path - -import pytest - -from polus.tabular._plugins.classes import PLUGINS, list_plugins -from polus.tabular._plugins.manifests import ( - InvalidManifestError, - _load_manifest, - validate_manifest, -) -from polus.tabular._plugins.models import ComputeSchema, WIPPPluginManifest - -RSRC_PATH = Path(__file__).parent.joinpath("resources") - -d_val = { - "name": "BaSiC Flatfield Correction Plugin", - "version": "1.2.7", - "title": "Flatfield correction using BaSiC algorithm.", - "description": "Generates images used for flatfield correction using the BaSiC algorithm.", - "author": "Nick Schaub (nick.schaub@nih.gov)", - "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/polusai/polus-plugins", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "citation": 'Peng et al. "A BaSiC tool for background and shading correction of optical microscopy images" Nature Communications (2017)', - "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", - "inputs": [ - { - "name": "inpDir", - "type": "collection", - "description": "Input image collection.", - "required": True, - }, - { - "name": "filePattern", - "type": "string", - "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", - "required": True, - }, - { - "name": "darkfield", - "type": "boolean", - "description": "Calculate darkfield image.", - "required": True, - }, - { - "name": "photobleach", - "type": "boolean", - "description": "Calculate photobleaching offsets.", - "required": True, - }, - { - "name": "groupBy", - "type": "string", - "description": "Group images together for flatfield by variable.", - "required": False, - }, - ], - "outputs": [ - { - "name": "outDir", - "type": "collection", - "description": "Output data for the plugin", - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input image collection: ", - "description": "Image collection...", - }, - { - "key": "inputs.filePattern", - "title": "Filename pattern: ", - "description": "Use a filename pattern to calculate flatfield information by subsets", - }, - { - "key": "inputs.groupBy", - "title": "Grouping Variables: ", - "description": "Group data together with varying variable values.", - }, - { - "key": "inputs.darkfield", - "title": "Calculate darkfield: ", - "description": "If selected, will generate a darkfield image", - }, - { - "key": "inputs.photobleach", - "title": "Calclate photobleaching offset: ", - "description": "If selected, will generate an offset scalar for each image", - }, - ], -} - -test_dict_load = OrderedDict( - { - "dictionary": { - "name": "BaSiC Flatfield Correction Plugin", - "version": "1.2.7", - "title": "Flatfield correction using BaSiC algorithm.", - "description": "Generates images used for flatfield correction using the BaSiC algorithm.", - "author": "Nick Schaub (nick.schaub@nih.gov)", - "institution": "National Center for the Advancing Translational Sciences, National Institutes of Health", - "repository": "https://github.com/polusai/polus-plugins", - "website": "https://ncats.nih.gov/preclinical/core/informatics", - "citation": 'Peng et al. "A BaSiC tool for background and shading correction of optical microscopy images" Nature Communications (2017)', - "containerId": "polusai/basic-flatfield-correction-plugin:1.2.7", - "inputs": [ - { - "name": "inpDir", - "type": "collection", - "description": "Input image collection.", - "required": True, - }, - { - "name": "filePattern", - "type": "string", - "description": "Filename pattern used to separate images by channel, timepoint, and replicate.", - "required": True, - }, - { - "name": "darkfield", - "type": "boolean", - "description": "Calculate darkfield image.", - "required": True, - }, - { - "name": "photobleach", - "type": "boolean", - "description": "Calculate photobleaching offsets.", - "required": True, - }, - { - "name": "groupBy", - "type": "string", - "description": "Group images together for flatfield by variable.", - "required": False, - }, - ], - "outputs": [ - { - "name": "outDir", - "type": "collection", - "description": "Output data for the plugin", - } - ], - "ui": [ - { - "key": "inputs.inpDir", - "title": "Input image collection: ", - "description": "Image collection...", - }, - { - "key": "inputs.filePattern", - "title": "Filename pattern: ", - "description": "Use a filename pattern to calculate flatfield information by subsets", - }, - { - "key": "inputs.groupBy", - "title": "Grouping Variables: ", - "description": "Group data together with varying variable values.", - }, - { - "key": "inputs.darkfield", - "title": "Calculate darkfield: ", - "description": "If selected, will generate a darkfield image", - }, - { - "key": "inputs.photobleach", - "title": "Calclate photobleaching offset: ", - "description": "If selected, will generate an offset scalar for each image", - }, - ], - }, - "path": RSRC_PATH.joinpath("g1.json"), - } -) - -REPO_PATH = RSRC_PATH.parent.parent -LOCAL_MANIFESTS = list(REPO_PATH.rglob("*plugin.json")) -LOCAL_MANIFESTS = [ - x for x in LOCAL_MANIFESTS if "cookiecutter.project" not in str(x) -] # filter cookiecutter templates -LOCAL_MANIFEST_NAMES = [str(x) for x in LOCAL_MANIFESTS] - - -def _get_path(manifest): - """Return path of local plugin manifest.""" - return PLUGINS[manifest][max(PLUGINS[manifest])] - - -# @pytest.mark.repo -# @pytest.mark.parametrize("manifest", LOCAL_MANIFESTS, ids=LOCAL_MANIFEST_NAMES) -# def test_manifests_local(manifest): -# """Test local (repo) manifests.""" -# assert isinstance(validate_manifest(manifest), (WIPPPluginManifest, ComputeSchema)) - - -def test_list_plugins(): - """Test `list_plugins()`.""" - o = list(PLUGINS.keys()) - o.sort() - assert o == list_plugins() - - -@pytest.mark.parametrize("manifest", list_plugins(), ids=list_plugins()) -def test_manifests_plugindir(manifest): - """Test manifests available in polus-plugins installation dir.""" - p = _get_path(manifest) - assert isinstance(validate_manifest(p), (WIPPPluginManifest, ComputeSchema)) - - -@pytest.mark.parametrize("type_", test_dict_load.values(), ids=test_dict_load.keys()) -def test_load_manifest(type_): # test path and dict - """Test _load_manifest() for types path and dict.""" - assert _load_manifest(type_) == d_val - - -bad = [f"b{x}.json" for x in [1, 2, 3]] -good = [f"g{x}.json" for x in [1, 2, 3]] - - -@pytest.mark.parametrize("manifest", bad, ids=bad) -def test_bad_manifest(manifest): - """Test bad manifests raise InvalidManifest error.""" - with pytest.raises(InvalidManifestError): - validate_manifest(REPO_PATH.joinpath("tests", "resources", manifest)) - - -@pytest.mark.parametrize("manifest", good, ids=good) -def test_good_manifest(manifest): - """Test different manifests that all should pass validation.""" - p = RSRC_PATH.joinpath(manifest) - assert isinstance(validate_manifest(p), (WIPPPluginManifest, ComputeSchema)) diff --git a/tests/test_plugins.py b/tests/test_plugins.py deleted file mode 100644 index 0e02dd6..0000000 --- a/tests/test_plugins.py +++ /dev/null @@ -1,198 +0,0 @@ -# type: ignore -# pylint: disable=C0116, W0621, W0613 -"""Plugin Object Tests.""" -from pathlib import Path - -import pytest - -import polus.tabular as pp -from polus.tabular._plugins.classes import Plugin, _load_plugin - -RSRC_PATH = Path(__file__).parent.joinpath("resources") -OMECONVERTER = RSRC_PATH.joinpath("omeconverter022.json") -BASIC_131 = ( - "https://raw.githubusercontent.com/PolusAI/polus-plugins/" - "e8f23a3661e3e5f7ad7dc92f4b0d9c31e7076589/regression/" - "polus-basic-flatfield-correction-plugin/plugin.json" -) -BASIC_127 = ( - "https://raw.githubusercontent.com/PolusAI/polus-plugins/" - "440e64a51a578e21b574009424a75c848ebbbb03/regression/polus-basic" - "-flatfield-correction-plugin/plugin.json" -) - - -@pytest.fixture -def remove_all(): - """Remove all plugins.""" - pp.remove_all() - - -def test_empty_list(remove_all): - """Test empty list.""" - assert pp.list == [] - - -def test_submit_plugin(remove_all): - """Test submit_plugin.""" - pp.submit_plugin(OMECONVERTER) - assert pp.list == ["OmeConverter"] - - -@pytest.fixture -def submit_omeconverter(): - pp.submit_plugin(OMECONVERTER) - - -@pytest.fixture -def submit_basic131(): - pp.submit_plugin(BASIC_131) - - -@pytest.fixture -def submit_basic127(): - pp.submit_plugin(BASIC_127) - - -def test_get_plugin(submit_omeconverter): - """Test get_plugin.""" - assert isinstance(pp.get_plugin("OmeConverter"), Plugin) - - -def test_url1(submit_omeconverter, submit_basic131): - """Test url submit.""" - assert sorted(pp.list) == ["BasicFlatfieldCorrectionPlugin", "OmeConverter"] - - -def test_url2(submit_omeconverter, submit_basic131, submit_basic127): - """Test url submit.""" - assert sorted(pp.list) == ["BasicFlatfieldCorrectionPlugin", "OmeConverter"] - - -def test_load_plugin(submit_omeconverter): - """Test load_plugin.""" - assert _load_plugin(OMECONVERTER).name == "OME Converter" - - -def test_load_plugin2(submit_basic131): - """Test load_plugin.""" - assert _load_plugin(BASIC_131).name == "BaSiC Flatfield Correction Plugin" - - -def test_attr1(submit_omeconverter): - """Test attributes.""" - p_attr = pp.OmeConverter - p_get = pp.get_plugin("OmeConverter") - for attr in p_get.__dict__: - if attr == "id": - continue - assert getattr(p_attr, attr) == getattr(p_get, attr) - - -def test_attr2(submit_basic131): - """Test attributes.""" - p_attr = pp.BasicFlatfieldCorrectionPlugin - p_get = pp.get_plugin("BasicFlatfieldCorrectionPlugin") - for attr in p_get.__dict__: - if attr == "id": - continue - assert getattr(p_attr, attr) == getattr(p_get, attr) - - -def test_versions(submit_basic131, submit_basic127): - """Test versions.""" - assert sorted(pp.get_plugin("BasicFlatfieldCorrectionPlugin").versions) == [ - "1.2.7", - "1.3.1", - ] - - -def test_get_max_version1(submit_basic131, submit_basic127): - """Test get max version.""" - plug = pp.get_plugin("BasicFlatfieldCorrectionPlugin") - assert plug.version == "1.3.1" - - -def test_get_max_version2(submit_basic131, submit_basic127): - """Test get max version.""" - plug = pp.BasicFlatfieldCorrectionPlugin - assert plug.version == "1.3.1" - - -def test_get_specific_version(submit_basic131, submit_basic127): - """Test get specific version.""" - plug = pp.get_plugin("BasicFlatfieldCorrectionPlugin", "1.2.7") - assert plug.version == "1.2.7" - - -def test_remove_version(submit_basic131, submit_basic127): - """Test remove version.""" - pp.remove_plugin("BasicFlatfieldCorrectionPlugin", "1.2.7") - assert pp.BasicFlatfieldCorrectionPlugin.versions == ["1.3.1"] - - -def test_remove_all_versions_plugin( - submit_basic131, submit_basic127, submit_omeconverter -): - """Test remove all versions plugin.""" - pp.remove_plugin("BasicFlatfieldCorrectionPlugin") - assert pp.list == ["OmeConverter"] - - -def test_submit_str_1(): - """Test submit_plugin with string.""" - pp.remove_all() - pp.submit_plugin(str(OMECONVERTER)) - assert pp.list == ["OmeConverter"] - - -def test_submit_str_2(): - """Test submit_plugin with string.""" - pp.remove_all() - pp.submit_plugin(str(OMECONVERTER.absolute())) - assert pp.list == ["OmeConverter"] - - -@pytest.fixture -def plug1(): - """Configure the class.""" - pp.submit_plugin(BASIC_131) - plug1 = pp.BasicFlatfieldCorrectionPlugin - plug1.inpDir = RSRC_PATH.absolute() - plug1.outDir = RSRC_PATH.absolute() - plug1.filePattern = "*.ome.tif" - plug1.darkfield = True - plug1.photobleach = False - return plug1 - - -@pytest.fixture(scope="session") -def config_path(tmp_path_factory): - """Temp config path.""" - return tmp_path_factory.mktemp("config") / "config1.json" - - -def test_save_load_config(plug1, config_path): - """Test save_config, load_config from config file.""" - plug1.save_config(config_path) - plug2 = pp.load_config(config_path) - for i_o in ["inpDir", "outDir", "filePattern"]: - assert getattr(plug2, i_o) == getattr(plug1, i_o) - assert plug2.id == plug1.id - - -def test_load_config_no_plugin(plug1, config_path): - """Test load_config after removing plugin.""" - plug1.save_config(config_path) - plug1_id = plug1.id - pp.remove_plugin("BasicFlatfieldCorrectionPlugin") - assert pp.list == ["OmeConverter"] - plug2 = pp.load_config(config_path) - assert isinstance(plug2, Plugin) - assert plug2.id == plug1_id - - -def test_remove_all(submit_basic131, submit_basic127, submit_omeconverter): - """Test remove_all.""" - pp.remove_all() - assert pp.list == [] diff --git a/tests/test_version.py b/tests/test_version.py deleted file mode 100644 index 346b67b..0000000 --- a/tests/test_version.py +++ /dev/null @@ -1,171 +0,0 @@ -"""Test Version object and cast_version utility function.""" -import pydantic -import pytest -from pydantic import ValidationError - -from polus.tabular._plugins.io import Version -from polus.tabular._plugins.utils import cast_version - -PYDANTIC_VERSION = pydantic.__version__.split(".", maxsplit=1)[0] - -GOOD_VERSIONS = [ - "1.2.3", - "1.4.7-rc1", - "4.1.5", - "12.8.3", - "10.2.0", - "2.2.3-dev5", - "0.3.4", - "0.2.34-rc23", -] -BAD_VERSIONS = ["02.2.3", "002.2.3", "1.2", "1.0", "1.03.2", "23.3.03", "d.2.4"] - -PV = PYDANTIC_VERSION -print(PV) - - -@pytest.mark.parametrize("ver", GOOD_VERSIONS, ids=GOOD_VERSIONS) -def test_version(ver): - """Test Version pydantic model.""" - if PV == "1": - assert isinstance(Version(version=ver), Version) - assert isinstance(Version(ver), Version) - - -@pytest.mark.skipif(int(PV) > 1, reason="requires pydantic 1") -@pytest.mark.parametrize("ver", GOOD_VERSIONS, ids=GOOD_VERSIONS) -def test_cast_version(ver): - """Test cast_version utility function.""" - assert isinstance(cast_version(ver), Version) - - -@pytest.mark.parametrize("ver", BAD_VERSIONS, ids=BAD_VERSIONS) -def test_bad_version1(ver): - """Test ValidationError is raised for invalid versions.""" - if PV == "1": - with pytest.raises(ValidationError): - assert isinstance(cast_version(ver), Version) - with pytest.raises(ValidationError): - assert isinstance(Version(ver), Version) - - -MAJOR_VERSION_EQUAL = ["2.4.3", "2.98.28", "2.1.2", "2.0.0", "2.4.0"] -MINOR_VERSION_EQUAL = ["1.3.3", "7.3.4", "98.3.12", "23.3.0", "1.3.5"] -PATCH_EQUAL = ["12.2.7", "2.3.7", "1.7.7", "7.7.7", "7.29.7"] - - -@pytest.mark.parametrize("ver", MAJOR_VERSION_EQUAL, ids=MAJOR_VERSION_EQUAL) -def test_major(ver): - """Test major version.""" - if PV == "2": - assert Version(ver).major == 2 - else: - assert cast_version(ver).major == 2 - - -@pytest.mark.parametrize("ver", MINOR_VERSION_EQUAL, ids=MINOR_VERSION_EQUAL) -def test_minor(ver): - """Test minor version.""" - if PV == "2": - assert Version(ver).minor == 3 - else: - assert cast_version(ver).minor == 3 - - -@pytest.mark.parametrize("ver", PATCH_EQUAL, ids=PATCH_EQUAL) -def test_patch(ver): - """Test patch version.""" - if PV == "2": - assert Version(ver).patch == 7 - else: - assert cast_version(ver).patch == 7 - - -def test_gt1(): - """Test greater than operator.""" - if PV == "2": - assert Version("1.2.3") > Version("1.2.1") - else: - assert cast_version("1.2.3") > cast_version("1.2.1") - - -def test_gt2(): - """Test greater than operator.""" - if PV == "2": - assert Version("5.7.3") > Version("5.6.3") - else: - assert cast_version("5.7.3") > cast_version("5.6.3") - - -def test_st1(): - """Test less than operator.""" - if PV == "2": - assert Version("5.7.3") < Version("5.7.31") - else: - assert cast_version("5.7.3") < cast_version("5.7.31") - - -def test_st2(): - """Test less than operator.""" - if PV == "2": - assert Version("1.0.2") < Version("2.0.2") - else: - assert cast_version("1.0.2") < cast_version("2.0.2") - - -def test_eq1(): - """Test equality operator.""" - if PV == "2": - assert Version("1.3.3") == Version("1.3.3") - else: - assert Version(version="1.3.3") == cast_version("1.3.3") - - -def test_eq2(): - """Test equality operator.""" - if PV == "2": - assert Version("5.4.3") == Version("5.4.3") - else: - assert Version(version="5.4.3") == cast_version("5.4.3") - - -def test_eq3(): - """Test equality operator.""" - if PV == "2": - assert Version("1.3.3") != Version("1.3.8") - else: - assert Version(version="1.3.3") != cast_version("1.3.8") - - -def test_eq_str1(): - """Test equality with str.""" - if PV == "2": - assert Version("1.3.3") == "1.3.3" - else: - assert Version(version="1.3.3") == "1.3.3" - - -def test_lt_str1(): - """Test equality with str.""" - if PV == "2": - assert Version("1.3.3") < "1.5.3" - else: - assert Version(version="1.3.3") < "1.5.3" - - -def test_gt_str1(): - """Test equality with str.""" - if PV == "2": - assert Version("4.5.10") > "4.5.9" - else: - assert Version(version="4.5.10") > "4.5.9" - - -def test_eq_no_str(): - """Test equality with non-string.""" - if PV == "2": - with pytest.raises(TypeError): - assert Version("1.3.3") == 1.3 - else: - with pytest.raises(TypeError): - assert Version(version="1.3.3") == 1.3 diff --git a/utils/polus-python-template/.bumpversion.cfg b/utils/polus-python-template/.bumpversion.cfg deleted file mode 100644 index 50aa146..0000000 --- a/utils/polus-python-template/.bumpversion.cfg +++ /dev/null @@ -1,23 +0,0 @@ -[bumpversion] -current_version = 1.1.0-dev1 -commit = False -tag = False -parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? -serialize = - {major}.{minor}.{patch}-{release}{dev} - {major}.{minor}.{patch} - -[bumpversion:part:release] -optional_value = _ -first_value = dev -values = - dev - _ - -[bumpversion:part:dev] - -[bumpversion:file:pyproject.toml] -search = version = "{current_version}" -replace = version = "{new_version}" - -[bumpversion:file:README.md] diff --git a/utils/polus-python-template/.gitignore b/utils/polus-python-template/.gitignore deleted file mode 100644 index d27abdc..0000000 --- a/utils/polus-python-template/.gitignore +++ /dev/null @@ -1 +0,0 @@ -poetry.lock \ No newline at end of file diff --git a/utils/polus-python-template/CHANGELOG.md b/utils/polus-python-template/CHANGELOG.md deleted file mode 100644 index 0d93e71..0000000 --- a/utils/polus-python-template/CHANGELOG.md +++ /dev/null @@ -1,14 +0,0 @@ -# CHANGELOG - -# 1.0.0 - -* Generate plugins from templates using cookiecutter. - -# 1.1.0 - -* Generate plugins following updated [standard guidelines](https://labshare.atlassian.net/wiki/spaces/WIPP/pages/3275980801/Python+Plugin+Standards) - -# 1.1.0-dev1 - -* Updated this package for tabular-tools repo - diff --git a/utils/polus-python-template/README.md b/utils/polus-python-template/README.md deleted file mode 100644 index 4c8d0e5..0000000 --- a/utils/polus-python-template/README.md +++ /dev/null @@ -1,112 +0,0 @@ -# WIPP Plugin Cookie Cutter (for Python) (v1.1.0-dev1) - -This repository is a cookie cutter template that creates the basic scaffold structure of a -polus plugin and add it to the polus plugins directory structure. - -## How to use -1. Clone `tabular-tools` and change to the tabular-tools directory -2. `cd /utils/polus-python-template/` -3. (optional) Install poetry if not available. -4. (optional) Create a dedicated environment with conda or venv. -5. Install the dependencies: `poetry install` -6. Ignore changes to `cookiecutter.json` using: `git update-index --assume-unchanged cookiecutter.json` -7. Modify `cookiecutter.json` to include author and plugin information.`plugin_package` should always start with `polus.tabular`. -** NOTE: ** Do not edit values in brackets ({}) as they are edited by cookiecutter directly. -Those are automatically generated from the previous entries. If your plugin is called -"Awesome Function", then the plugin folder and docker container will have the name `awesome-function-plugin`. -8. Create your plugin skeleton: `python -m cookiecutter . --no-input` - - -## Plugin Standard -The generated plugin will be compatible with polus most up-to-date guidelines : -see [standard guidelines](https://labshare.atlassian.net/wiki/spaces/WIPP/pages/3275980801/Python+Plugin+Standards) - -The code generated provides out-of-box support for : - - customizing the plugin code. - - implementing tests. - - creating and running a container. - - managing versioning. - - updating documentation (README, CHANGELOG). - - maintaining a WIPP manifest (plugin.json). - - -## Executing the plugin - -The plugin should be run as a package. -To install the package : - -`pip install .` - -The skeleton code can be run this way : -From the plugin's top directory (with the default values): - -`python -m polus.tabular.package1.package2.awesome_function -i /tmp/inp -o /tmp/out` - -This should print some logs with the provided inputs and outputs and return. - -## Running tests -Plugin's developer should use `pytest`. -Some simple tests have been added to the template as examples. -Before submitting a PR to `tabular-tools`, other unit tests should be created and added to the `tests` -directory. - -To run tests : - -From the plugin's top directory, type `python -m pytest`. -Depending on how you have set up your environment, you may be able to run the pytest cli directly `pytest`. See pytest doc for how the project source directory is scanned to collect tests. -This should run a test successfully and return. - - -## Creating and running a container - -` ./build-docker.sh && ./run-plugin.sh` - -Build the docker image and run the container. - -### DockerFile -A docker image is build from a base image with common dependencies pre-installed. -The image entrypoint will run the plugin's package entrypoint. - -### build-docker.sh -Run this script to build the container. - -### run-plugin.sh -Run the container locally. - - -## Customize the plugin - -### Project code - -A set of common dependencies are added to `pyproject.toml`. -Update according to your needs. - -### Managing versioning - -Making sure that the file version is consistent across files in a plugin can be -challenging, so the Python template now uses -[bump2version](https://github.com/c4urself/bump2version) -to help manage versioning. This automatically changes the `VERSION` and -`plugin.json` files to the next version, preventing you from having to remember -to change the version everywhere. The `bumpversion.cfg` can be modified to -change the version in other files as well. - -To use this feature: -`bump2version --config-file bumpversion.cfg` - -### Documentation - -#### README.md - -A basic description of what the plugin does. This should define all the inputs -and outputs. - -#### CHANGELOG.md - -Documents updates made to the plugin. - - -### WIPP manifest (plugin.json). - -This file defines the input and output variables for WIPP, and defines the UI -components showed to the user. diff --git a/utils/polus-python-template/VERSION b/utils/polus-python-template/VERSION deleted file mode 100644 index 3018fdc..0000000 --- a/utils/polus-python-template/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.1.0-dev0 diff --git a/utils/polus-python-template/hooks/post_gen_project.py b/utils/polus-python-template/hooks/post_gen_project.py deleted file mode 100644 index 745c331..0000000 --- a/utils/polus-python-template/hooks/post_gen_project.py +++ /dev/null @@ -1,63 +0,0 @@ -import os -import shutil -from pathlib import Path -import logging -from os import environ - -logging.basicConfig( - format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", - datefmt="%d-%b-%y %H:%M:%S", -) -POLUS_LOG = getattr(logging, environ.get("POLUS_LOG", "DEBUG")) -logger = logging.getLogger("polus-python-template-post") -logger.setLevel(POLUS_LOG) - - -def create_repository_directories(source_dir): - """ Buid the correct directories inside polus-plugins. - The directory structure must conforms to the plugin's spec : - - dash-separated word in identifier. - - folder hierarchy matches package namespace minus "polus.tabular" - - plugin's folder name reflects the plugin package name but ends with "-plugin" - Ex: polus.tabular.package1.package2.awesome_function becomes - package1/package2/awesome-function-plugin - """ - - # try to find the project's root, otherwise we stay in the - # staging directory - final_dir = source_dir.parent - for folder in Path(final_dir).parent.parents: - if os.path.exists(folder / ".git"): - final_dir = folder - break - - # by default we create a plugin directory at the root - target_dir = final_dir - - # figure out if additional directories need to be created at the root - # make sure we replace underscores - new_dirs = "{{cookiecutter.plugin_package}}".replace("_", "-") - new_dirs = new_dirs.split(".") - # remove polus.tabular so we only keep intermediary directories - # Ex: polus.tabular.package1.package2.awesome_function creates - # package1/package2/ - new_dirs = new_dirs[2:-1] - if len(new_dirs) != 0: - package_dir = os.path.join(*new_dirs) - target_dir = final_dir / package_dir - - # create the plugin directory - os.makedirs(target_dir, exist_ok=True) - - return target_dir - - -def move_project_source_to_final_location(): - """Move staged files to the the final target repo.""" - source_dir = Path(os.getcwd()) - target_dir = create_repository_directories(source_dir) - logger.debug(f"moving sources from {source_dir} to {target_dir}") - shutil.move(source_dir, target_dir) - -# NOTE do not create folder structure with the repo at the moment. -# move_project_source_to_final_location() \ No newline at end of file diff --git a/utils/polus-python-template/hooks/pre_gen_project.py b/utils/polus-python-template/hooks/pre_gen_project.py deleted file mode 100644 index 894625a..0000000 --- a/utils/polus-python-template/hooks/pre_gen_project.py +++ /dev/null @@ -1,55 +0,0 @@ -""" -Validate of template variables before templating the project -""" -import logging -from os import environ - -logging.basicConfig( - format="%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s", - datefmt="%d-%b-%y %H:%M:%S", -) -POLUS_LOG = getattr(logging, environ.get("POLUS_LOG", "DEBUG")) -logger = logging.getLogger("polus-python-template-pre") -logger.setLevel(POLUS_LOG) - -# NOTE Those validation could be performed on a plugin.json -# using polus plugins pydantic models. - -author = "{{ cookiecutter.author }}" -# TODO check valid - -author_email = "{{ cookiecutter.author_email }}" -## TODO check valid - -plugin_package = "{{ cookiecutter.plugin_package }}" -if not plugin_package.startswith("polus.tabular."): - raise ValueError( - f"plugin package must be a child of polus.tabular." - + f"plugin_package must start with 'polus.tabular'. Got : {plugin_package}" - ) -if plugin_package.endswith("_tool"): - raise ValueError( - f"plugin_package must not ends with _plugin. Got : {plugin_package}" - ) - -# TODO check we have a valid python package name - -plugin_version = "{{ cookiecutter.plugin_version }}" -# TODO check version is valid - -project_name = "{{ cookiecutter.project_name }}" -assert not ("_" in project_name) and not ("." in project_name) - -plugin_slug = "{{ cookiecutter.plugin_slug }}" -assert plugin_slug.startswith("polus-") and plugin_slug.endswith("-tool") - -container_name = "{{ cookiecutter.container_name }}" -assert container_name.endswith("-tool") - -container_id = "{{ cookiecutter.container_id }}" -assert container_id.startswith("polusai/") - -container_version = "{{ cookiecutter.container_version }}" -assert container_version == plugin_version - -logger.debug(f"plugin_package: {plugin_package}" ) diff --git a/utils/polus-python-template/pyproject.toml b/utils/polus-python-template/pyproject.toml deleted file mode 100644 index d6adbb2..0000000 --- a/utils/polus-python-template/pyproject.toml +++ /dev/null @@ -1,32 +0,0 @@ -[tool.poetry] -name = "polus-python-template" -version = "1.1.0-dev1" -description = "" -authors = ["Nick Schaub ", "Antoine Gerardin "] -readme = "README.md" -packages = [{include = "polus_python_template"}] - - -[tool.poetry.dependencies] -python = ">=3.9,<3.12" - -[tool.poetry.group.dev.dependencies] -cookiecutter = "1.7.2" -jinja2_ospath = "0.3.0" -bump2version = "^1.0.1" -pytest = "^7.4" -pytest-sugar = "^0.9.6" -pre-commit = "^3.2.1" -black = "^23.3.0" -mypy = "^1.1.1" -ruff = "^0.0.270" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - - -[tool.pytest.ini_options] -pythonpath = [ - "." -] diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/.bumpversion.cfg b/utils/polus-python-template/{{cookiecutter.container_name}}/.bumpversion.cfg deleted file mode 100644 index ae20e5c..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/.bumpversion.cfg +++ /dev/null @@ -1,29 +0,0 @@ -[bumpversion] -current_version = {{ cookiecutter.plugin_version }} -commit = False -tag = False -parse = (?P\d+)\.(?P\d+)\.(?P\d+)(\-(?P[a-z]+)(?P\d+))? -serialize = - {major}.{minor}.{patch}-{release}{dev} - {major}.{minor}.{patch} - -[bumpversion:part:release] -optional_value = _ -first_value = dev -values = - dev - _ - -[bumpversion:part:dev] - -[bumpversion:file:pyproject.toml] -search = version = "{current_version}" -replace = version = "{new_version}" - -[bumpversion:file:VERSION] - -[bumpversion:file:README.md] - -[bumpversion:file:plugin.json] - -[bumpversion:file:src/{{cookiecutter.package_folders}}/__init__.py] diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/.dockerignore b/utils/polus-python-template/{{cookiecutter.container_name}}/.dockerignore deleted file mode 100644 index 7c603f8..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/.dockerignore +++ /dev/null @@ -1,4 +0,0 @@ -.venv -out -tests -__pycache__ diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/.gitignore b/utils/polus-python-template/{{cookiecutter.container_name}}/.gitignore deleted file mode 100644 index c04bc49..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/.gitignore +++ /dev/null @@ -1 +0,0 @@ -poetry.lock diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/CHANGELOG.md b/utils/polus-python-template/{{cookiecutter.container_name}}/CHANGELOG.md deleted file mode 100644 index ca292da..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/CHANGELOG.md +++ /dev/null @@ -1,5 +0,0 @@ -# CHANGELOG - -## {{cookiecutter.container_version}} - -Initial release. diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile b/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile deleted file mode 100644 index b615060..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -FROM polusai/bfio:2.3.6 - -# environment variables defined in polusai/bfio -# ENV EXEC_DIR="/opt/executables" -# ENV DATA_DIR="/data" -# ENV POLUS_EXT=".ome.tif" -# Change to WARNING for fewer logs, and DEBUG for debugging -ENV POLUS_LOG="INFO" - -ENV POLUS_IMG_EXT=".ome.tif" -ENV POLUS_TAB_EXT=".csv" - -# Work directory defined in the base container -# WORKDIR ${EXEC_DIR} - -COPY pyproject.toml ${EXEC_DIR} -COPY VERSION ${EXEC_DIR} -COPY README.md ${EXEC_DIR} -COPY CHANGELOG.md ${EXEC_DIR} -COPY src ${EXEC_DIR}/src - -RUN pip3 install ${EXEC_DIR} --no-cache-dir - -# Default command. Additional arguments are provided through the command line -ENTRYPOINT ["python3", "-m", "{{cookiecutter.plugin_package}}"] -CMD ["--help"] diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/README.md b/utils/polus-python-template/{{cookiecutter.container_name}}/README.md deleted file mode 100644 index f99b4a8..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# {{cookiecutter.plugin_name}} ({{cookiecutter.plugin_version}}) - -{{cookiecutter.plugin_description}} - -## Building - -To build the Docker image for the conversion plugin, run `./build-docker.sh`. - -## Install WIPP Plugin - -If WIPP is running, navigate to the plugins page and add a new plugin. Paste the -contents of `plugin.json` into the pop-up window and submit. - -## Options - -This plugin takes 2 input arguments and 1 output argument: - -| Name | Description | I/O | Type | Default -|---------------|-------------------------|--------|--------| -| inpDir | Input image collection to be processed by this plugin | Input | collection -| filePattern | Filename pattern used to separate data | Input | string | .* -| preview | Generate an output preview | Input | boolean | False -| outDir | Output collection | Output | collection diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/VERSION b/utils/polus-python-template/{{cookiecutter.container_name}}/VERSION deleted file mode 100644 index 6c21993..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/VERSION +++ /dev/null @@ -1 +0,0 @@ -{{ cookiecutter.plugin_version }} diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/build-docker.sh b/utils/polus-python-template/{{cookiecutter.container_name}}/build-docker.sh deleted file mode 100755 index cf00ccc..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/build-docker.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -version=$("] -readme = "README.md" -packages = [{include = "polus", from = "src"}] - -[tool.poetry.dependencies] -python = ">=3.9,<3.12" -bfio = {version = ">=2.3.3,<3.0", extras = ["all"]} -filepattern = ">=2.0.4,<3.0" -preadator = "^0.4.0.dev2" -typer = "^0.7.0" - -[tool.poetry.group.dev.dependencies] -bump2version = "^1.0.1" -pytest = "^7.4" -pytest-sugar = "^0.9.6" -pre-commit = "^3.2.1" -black = "^23.3.0" -mypy = "^1.1.1" -ruff = "^0.0.270" - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - -[tool.pytest.ini_options] -pythonpath = [ - "." -] diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/run-plugin.sh b/utils/polus-python-template/{{cookiecutter.container_name}}/run-plugin.sh deleted file mode 100755 index d979d07..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/run-plugin.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -version=$( None: - """Generate preview of the plugin outputs.""" - - preview = {} - - with Path.open(out_dir / "preview.json", "w") as fw: - json.dump(preview, fw, indent=2) - -@app.command() -def main( - inp_dir: Path = typer.Option( - ..., - "--inpDir", - "-i", - help="Input directory to be processed.", - exists=True, - readable=True, - file_okay=False, - resolve_path=True, - ), - filepattern: str = typer.Option( - ".*", - "--filePattern", - "-f", - help="Filepattern used to filter inputs.", - ), - out_dir: Path = typer.Option( - ..., - "--outDir", - "-o", - help="Output directory.", - exists=True, - writable=True, - file_okay=False, - resolve_path=True, - ), - preview: bool = typer.Option( - False, - "--preview", - "-v", - help="Preview of expected outputs (dry-run)", - show_default=False, - ), -): - """{{cookiecutter.plugin_name}}.""" - logger.info(f"inpDir: {inp_dir}") - logger.info(f"filePattern: {filepattern}") - logger.info(f"outDir: {out_dir}") - - if preview: - generate_preview(inp_dir, out_dir) - logger.info(f"generating preview data in : {out_dir}.") - return - - {{cookiecutter.package_name}}(inp_dir, filepattern, out_dir) - - -if __name__ == "__main__": - app() diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/{{ cookiecutter.package_name }}.py b/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/{{ cookiecutter.package_name }}.py deleted file mode 100644 index 2573a72..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/src/{{cookiecutter.package_folders}}/{{ cookiecutter.package_name }}.py +++ /dev/null @@ -1,16 +0,0 @@ -"""{{ cookiecutter.plugin_name }}.""" - -from pathlib import Path - - -def {{cookiecutter.package_name}}(inp_dir: Path, filepattern: str, out_dir: Path): - """{{cookiecutter.plugin_name}}. - - Args: - inp_dir: input directory to process - filepattern: filepattern to filter inputs - out_dir: output directory - Returns: - None - """ - pass \ No newline at end of file diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/__init__.py b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/__init__.py deleted file mode 100644 index 28371ef..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"""Tests for {{cookiecutter.package_name}}.""" diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/conftest.py b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/conftest.py deleted file mode 100644 index fd0c321..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/conftest.py +++ /dev/null @@ -1,147 +0,0 @@ -"""Test fixtures. - -Set up all data used in tests. -""" -import tempfile -import shutil -from pathlib import Path -import numpy as np -import pytest -import itertools - -from bfio import BioWriter, BioReader - -def pytest_addoption(parser: pytest.Parser) -> None: - """Add options to pytest.""" - parser.addoption( - "--downloads", - action="store_true", - dest="downloads", - default=False, - help="run tests that download large data files", - ) - parser.addoption( - "--slow", - action="store_true", - dest="slow", - default=False, - help="run slow tests", - ) - - - - -IMAGE_SIZES = [(1024 * (2**i) ,1024 * (2**i)) for i in range(1, 2)] -LARGE_IMAGE_SIZES = [(1024 * (2**i) ,1024 * (2**i)) for i in range(4, 5)] -PIXEL_TYPES = [np.uint8, float] -PARAMS = [ - (image_size, pixel_type) - for image_size, pixel_type in itertools.product( - IMAGE_SIZES, PIXEL_TYPES - ) -] -LARGE_DATASET_PARAMS = [ - (image_size, pixel_type) - for image_size, pixel_type in itertools.product( - LARGE_IMAGE_SIZES, PIXEL_TYPES - ) -] - - -FixtureReturnType = tuple[ - Path, # input dir - Path, # output dir - Path, # ground truth path - Path, # input image path - Path, # ground truth path -] - - -@pytest.fixture(params=PARAMS) -def generate_test_data(request: pytest.FixtureRequest) -> FixtureReturnType: - """Generate staging temporary directories with test data and ground truth.""" - - # collect test params - image_size, pixel_type = request.param - test_data = _generate_test_data(image_size, pixel_type) - print(test_data) - yield from test_data - - -@pytest.fixture(params=LARGE_DATASET_PARAMS) -def generate_large_test_data(request: pytest.FixtureRequest) -> FixtureReturnType: - """Generate staging temporary directories with test data and ground truth.""" - - # collect test params - image_size, pixel_type = request.param - test_data =_generate_test_data(image_size, pixel_type) - - print(test_data) - - yield from test_data - - -def _generate_test_data(image_size : tuple[int,int], pixel_type: int) -> FixtureReturnType: - """Generate staging temporary directories with test data and ground truth.""" - - image_x, image_y = image_size - - # staging area - data_dir = Path(tempfile.mkdtemp(suffix="_data_dir")) - inp_dir = data_dir.joinpath("inp_dir") - inp_dir.mkdir(exist_ok=True) - out_dir = data_dir.joinpath("out_dir") - out_dir.mkdir(exist_ok=True) - ground_truth_dir = data_dir.joinpath("ground_truth_dir") - ground_truth_dir.mkdir(exist_ok=True) - - # generate image and ground_truth - img_path = inp_dir.joinpath("img.ome.tif") - image = gen_2D_image(img_path, image_x, image_y, pixel_type) - ground_truth_path = ground_truth_dir.joinpath("ground_truth.ome.tif") - gen_ground_truth(img_path, ground_truth_path) - - yield inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path - - shutil.rmtree(data_dir) - -def gen_2D_image( - img_path, - image_x, - image_y, - pixel_type -) : - """Generate a random 2D square image.""" - - if np.issubdtype(pixel_type, np.floating) : - rng = np.random.default_rng() - image = rng.uniform(0.0, 1.0, - size=(image_y, image_x) - ).astype(pixel_type) - else: - image = np.random.randint(0, 255, size=(image_y, image_x)) - - with BioWriter(img_path) as writer: - (y, x) = image.shape - writer.Y = y - writer.X = x - writer.Z = 1 - writer.C = 1 - writer.T = 1 - writer.dtype = image.dtype - writer[:] = image[:] - - return image - - -def gen_ground_truth(img_path : Path, ground_truth_path : Path): - """generate some ground truth from the image data. - Here we generate a simple binary mask. - """ - - with BioReader(img_path) as reader: - with BioWriter(ground_truth_path, metadata=reader.metadata) as writer: - ground_truth = np.asarray(reader[:] != 0) - writer[:] = ground_truth - - return ground_truth \ No newline at end of file diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_cli.py b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_cli.py deleted file mode 100644 index 1b51809..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_cli.py +++ /dev/null @@ -1,96 +0,0 @@ -"""Testing the Command Line Tool.""" - -import faulthandler -import json -from pathlib import Path -from typer.testing import CliRunner - -from .conftest import FixtureReturnType - -from {{cookiecutter.plugin_package}}.__main__ import app - -faulthandler.enable() - - -def test_cli(generate_test_data : FixtureReturnType) -> None: # noqa - """Test the command line.""" - inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_test_data #noqa - - runner = CliRunner() - - result = runner.invoke( - app, - [ - "--inpDir", - inp_dir, - "--outDir", - out_dir, - ], - ) - - assert result.exit_code == 0 - -def test_cli_short(generate_test_data : FixtureReturnType): # noqa - """Test the command line.""" - runner = CliRunner() - - inp_dir, out_dir, _, _, _ = generate_test_data #noqa - - result = runner.invoke( - app, - [ - "-i", - inp_dir, - "-o", - out_dir, - ], - ) - - assert result.exit_code == 0 - -def test_cli_preview(generate_test_data : FixtureReturnType): # noqa - """Test the preview option.""" - runner = CliRunner() - - inp_dir, out_dir, _, _, _ = generate_test_data #noqa - - - result = runner.invoke( - app, - [ - "--inpDir", - inp_dir, - "--outDir", - out_dir, - "--preview", - ], - ) - - assert result.exit_code == 0 - - with Path.open(out_dir / "preview.json") as file: - plugin_json = json.load(file) - - # verify we generate the preview file - assert plugin_json == {} - - -def test_cli_bad_input(generate_test_data : FixtureReturnType): # noqa - """Test bad inputs.""" - runner = CliRunner() - - inp_dir, out_dir, _, _, _ = generate_test_data #noqa - # replace with a bad path - inp_dir = "/does_not_exists" - - result = runner.invoke( - app, - [ - "--inpDir", - inp_dir, - "--outDir", - out_dir, - ], - ) - - assert result.exc_info[0] is SystemExit diff --git a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_{{cookiecutter.package_name}}.py b/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_{{cookiecutter.package_name}}.py deleted file mode 100644 index 75e3552..0000000 --- a/utils/polus-python-template/{{cookiecutter.container_name}}/tests/test_{{cookiecutter.package_name}}.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Tests for {{cookiecutter.package_name}}.""" - -import pytest -from {{cookiecutter.plugin_package}}.{{cookiecutter.package_name}} import ( - {{cookiecutter.package_name}}, -) -from .conftest import FixtureReturnType - - -def test_{{cookiecutter.package_name}}(generate_test_data : FixtureReturnType): - """Test {{cookiecutter.package_name}}.""" - inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_test_data - filepattern = ".*" - assert {{cookiecutter.package_name}}(inp_dir, filepattern, out_dir) == None - - -@pytest.mark.skipif("not config.getoption('slow')") -def test_{{cookiecutter.package_name}}(generate_large_test_data : FixtureReturnType): - """Test {{cookiecutter.package_name}}.""" - inp_dir, out_dir, ground_truth_dir, img_path, ground_truth_path = generate_large_test_data - filepattern = ".*" - assert {{cookiecutter.package_name}}(inp_dir, filepattern, out_dir) == None \ No newline at end of file From 44e11df5f13df877118aad8faea9241fbd5bd8b0 Mon Sep 17 00:00:00 2001 From: hamshkhawar Date: Tue, 9 Apr 2024 11:39:49 -0500 Subject: [PATCH 4/6] fix toml file of k-means --- clustering/k-means-clustering-tool/plugin.json | 2 +- clustering/k-means-clustering-tool/pyproject.toml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/clustering/k-means-clustering-tool/plugin.json b/clustering/k-means-clustering-tool/plugin.json index 769500a..4ff38a6 100644 --- a/clustering/k-means-clustering-tool/plugin.json +++ b/clustering/k-means-clustering-tool/plugin.json @@ -111,4 +111,4 @@ "condition": "model.inputs.methods==Manual" } ] -} \ No newline at end of file +} diff --git a/clustering/k-means-clustering-tool/pyproject.toml b/clustering/k-means-clustering-tool/pyproject.toml index d82fc64..ff2effc 100644 --- a/clustering/k-means-clustering-tool/pyproject.toml +++ b/clustering/k-means-clustering-tool/pyproject.toml @@ -1,11 +1,11 @@ [tool.poetry] name = "polus-tabular-clustering-k-means" -version = "version = "0.3.5-dev1"" -description = "" +version = "0.3.5-dev1" +description = "K-means clustering" authors = [ "Jayapriya Nagarajan ", "Kelechi Nina Mezu ", -"hamshkhawar " +"Hamdah Shafqat Abbasi " ] readme = "README.md" packages = [{include = "polus", from = "src"}] From d78e40237a52ddc14e5706b57dc32b9e1de22531 Mon Sep 17 00:00:00 2001 From: hamshkhawar Date: Tue, 9 Apr 2024 11:44:49 -0500 Subject: [PATCH 5/6] fixed pre-commmit checks for tabular thresholding plugin --- transforms/tabular-thresholding-tool/build-docker.sh | 2 +- transforms/tabular-thresholding-tool/plugin.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/transforms/tabular-thresholding-tool/build-docker.sh b/transforms/tabular-thresholding-tool/build-docker.sh index 45824a8..056da7e 100644 --- a/transforms/tabular-thresholding-tool/build-docker.sh +++ b/transforms/tabular-thresholding-tool/build-docker.sh @@ -1,2 +1,2 @@ version=$( Date: Tue, 9 Apr 2024 12:19:37 -0500 Subject: [PATCH 6/6] fixed git action failures --- clustering/outlier-removal-tool/Dockerfile | 2 ++ clustering/outlier-removal-tool/pyproject.toml | 1 + .../tabular-thresholding-tool/tests/{__init_.py => __init__.py} | 0 3 files changed, 3 insertions(+) rename transforms/tabular-thresholding-tool/tests/{__init_.py => __init__.py} (100%) diff --git a/clustering/outlier-removal-tool/Dockerfile b/clustering/outlier-removal-tool/Dockerfile index 3889076..9f88e9b 100644 --- a/clustering/outlier-removal-tool/Dockerfile +++ b/clustering/outlier-removal-tool/Dockerfile @@ -12,9 +12,11 @@ WORKDIR ${EXEC_DIR} COPY pyproject.toml ${EXEC_DIR} COPY VERSION ${EXEC_DIR} COPY README.md ${EXEC_DIR} +COPY src ${EXEC_DIR}/src RUN pip3 install ${EXEC_DIR} --no-cache + ENTRYPOINT ["python3", "-m", "polus.tabular.clustering.outlier_removal"] CMD ["--help"] diff --git a/clustering/outlier-removal-tool/pyproject.toml b/clustering/outlier-removal-tool/pyproject.toml index 73f65aa..cfd9548 100644 --- a/clustering/outlier-removal-tool/pyproject.toml +++ b/clustering/outlier-removal-tool/pyproject.toml @@ -19,6 +19,7 @@ vaex = "^4.17.0" scikit-learn = "^1.3.2" pyod = "^1.1.2" + [tool.poetry.group.dev.dependencies] pre-commit = "^3.3.3" bump2version = "^1.0.1" diff --git a/transforms/tabular-thresholding-tool/tests/__init_.py b/transforms/tabular-thresholding-tool/tests/__init__.py similarity index 100% rename from transforms/tabular-thresholding-tool/tests/__init_.py rename to transforms/tabular-thresholding-tool/tests/__init__.py