From e9c7f821fe1dfe5367fbe50a84d2d026294930fe Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 22:13:15 +0000 Subject: [PATCH 01/23] [pre-commit.ci] pre-commit autoupdate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.4.0 → v5.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.4.0...v5.0.0) - https://github.com/charliermarsh/ruff-pre-commit → https://github.com/astral-sh/ruff-pre-commit - [github.com/astral-sh/ruff-pre-commit: v0.0.275 → v0.8.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.0.275...v0.8.3) - [github.com/psf/black: 23.3.0 → 24.10.0](https://github.com/psf/black/compare/23.3.0...24.10.0) - [github.com/kynan/nbstripout: 0.6.1 → 0.8.1](https://github.com/kynan/nbstripout/compare/0.6.1...0.8.1) --- .pre-commit-config.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e51025dde..6c12323fc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,7 +3,7 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v5.0.0 hooks: - id: trailing-whitespace - id: check-json @@ -19,16 +19,16 @@ repos: - id: check-merge-conflict - id: end-of-file-fixer - id: sort-simple-yaml - - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.275 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.8.3 hooks: - id: ruff args: [--no-cache, --fix, --exit-non-zero-on-fix] - repo: https://github.com/psf/black - rev: 23.3.0 + rev: 24.10.0 hooks: - id: black - repo: https://github.com/kynan/nbstripout - rev: 0.6.1 + rev: 0.8.1 hooks: - id: nbstripout From 2d99b9e9f907ccbb15dc6a03fc6e74d67e2915f6 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 22:14:02 +0000 Subject: [PATCH 02/23] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- docs/conf.py | 1 + docs/drtsans/example_1d.py | 6 +++--- .../biosans_midrange_detector_barscan.ipynb | 3 +-- ...pixel_calibration_22487_with_midrange.ipynb | 9 +++------ .../biosans_wing_detector_barscan.ipynb | 3 +-- .../biosans_pixel_calibration_5767.ipynb | 9 +++------ ..._pixel_calibration_5767_with_midrange.ipynb | 10 +++------- .../biosans_pixel_calibration_838.ipynb | 8 +++----- ...iosans_pixel_calibration_838_midrange.ipynb | 9 +++------ .../gpsans/gpsans_pixel_calibration.ipynb | 4 ---- .../gpsans/gpsans_pixel_calibration_9905.ipynb | 3 --- .../gpsans/gpsans_reduction_1config.ipynb | 4 ++-- notebooks/tubewidth/gpsans_tubewidth.ipynb | 5 +---- scripts/common_utils.py | 1 + scripts/examples/plot_wavelength.py | 1 + ...ans_spice_prepare_sensitivities_template.py | 1 + scripts/prepare_sensitivities_biosans.py | 1 + scripts/prepare_sensitivities_gpsans.py | 1 + .../biosans_synthetic_sensitivity_dataset.py | 1 + src/drtsans/absolute_units.py | 1 + src/drtsans/frame_mode.py | 1 + src/drtsans/instruments.py | 2 +- src/drtsans/momentum_transfer.py | 18 ++++++++++-------- src/drtsans/mono/biosans/api.py | 1 + src/drtsans/mono/dark_current.py | 1 + src/drtsans/mono/gpsans/api.py | 1 + src/drtsans/mono/normalization.py | 1 + src/drtsans/mono/spice_xml_parser.py | 1 + src/drtsans/pixel_calibration.py | 14 ++++++-------- src/drtsans/sensitivity.py | 5 ----- .../sensitivity_correction_moving_detectors.py | 1 + src/drtsans/tof/eqsans/cfg.py | 1 + src/drtsans/tof/eqsans/normalization.py | 1 + tests/examples/BIOSANS.ipynb | 3 --- tests/examples/EQSANS_porasil.ipynb | 2 -- tests/examples/EQSANS_porasil.py | 1 + .../mono/biosans/test_create_event_nexus.py | 1 + .../test_overwrite_geometry_meta_data.py | 1 + .../mono/gpsans/test_create_event_nexus.py | 1 + tests/integration/drtsans/test_auto_wedge.py | 1 + .../drtsans/test_prepare_sensitivities.py | 1 + .../drtsans/tof/eqsans/test_generate_report.py | 2 +- .../drtsans/tof/eqsans/test_integration_api.py | 1 + .../drtsans/mono/test_momentum_transfer.py | 7 +------ .../mono/test_sample_detector_offsets.py | 1 + tests/unit/drtsans/test_detector.py | 1 - .../unit/drtsans/tof/eqsans/test_resolution.py | 6 ++---- tests/unit/test_all.py | 1 + 48 files changed, 70 insertions(+), 89 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 3678dcf94..5055cfd76 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -215,6 +215,7 @@ class ExecDirective(Directive): Credit goes to: https://stackoverflow.com/questions/27875455/displaying-dictionary-data-in-sphinx-documentation/29789910#29789910 """ + has_content = True def run(self): diff --git a/docs/drtsans/example_1d.py b/docs/drtsans/example_1d.py index 39de1973a..2b10d8651 100644 --- a/docs/drtsans/example_1d.py +++ b/docs/drtsans/example_1d.py @@ -8,9 +8,9 @@ # files config["mask"] = "/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017B_mp/beamstop60_mask_4m.nxs" config["flux"] = "/SNS/EQSANS/shared/instrument_configuration/bl6_flux_at_sample" -config[ - "sensitivity_file_path" -] = "/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017A_mp/Sensitivity_patched_thinPMMA_4m_79165_event.nxs" +config["sensitivity_file_path"] = ( + "/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017A_mp/Sensitivity_patched_thinPMMA_4m_79165_event.nxs" +) config["dark_current"] = "/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017B_mp/EQSANS_86275.nxs.h5" # numeric values diff --git a/notebooks/barscan/biosans_midrange_detector_barscan.ipynb b/notebooks/barscan/biosans_midrange_detector_barscan.ipynb index cbd70e1a8..cd8d16fec 100644 --- a/notebooks/barscan/biosans_midrange_detector_barscan.ipynb +++ b/notebooks/barscan/biosans_midrange_detector_barscan.ipynb @@ -15,11 +15,10 @@ "outputs": [], "source": [ "import os\n", - "import sys\n", "import time\n", "\n", "# Mantid imports\n", - "from mantid.simpleapi import LoadEventNexus, LoadNexusProcessed, SaveNexus, mtd\n", + "from mantid.simpleapi import LoadNexusProcessed, mtd\n", "\n", "# drtsans imports\n", "from drtsans.pixel_calibration import calculate_barscan_calibration, load_calibration, as_intensities\n", diff --git a/notebooks/barscan/biosans_pixel_calibration_22487_with_midrange.ipynb b/notebooks/barscan/biosans_pixel_calibration_22487_with_midrange.ipynb index 530f99899..3c5bec081 100644 --- a/notebooks/barscan/biosans_pixel_calibration_22487_with_midrange.ipynb +++ b/notebooks/barscan/biosans_pixel_calibration_22487_with_midrange.ipynb @@ -23,7 +23,7 @@ "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadEventAsWorkspace2D, LoadNexus, SaveNexus\n", + "from mantid.simpleapi import LoadNexus, SaveNexus\n", "from drtsans.mono.biosans import calculate_barscan_calibration, as_intensities, plot_detector\n", "from drtsans.pixel_calibration import Table\n", "\n", @@ -675,8 +675,7 @@ "outputs": [], "source": [ "import os\n", - "from mantid.simpleapi import LoadEventNexus\n", - "from drtsans.mono.biosans import load_calibration, plot_detector\n", + "from drtsans.mono.biosans import load_calibration\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -791,7 +790,7 @@ "import os\n", "import time\n", "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -900,7 +899,6 @@ }, "outputs": [], "source": [ - "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "start_time = time.time()\n", "calibration_main = calculate_apparent_tube_width(\n", @@ -996,7 +994,6 @@ "from mantid.api import mtd\n", "from mantid.simpleapi import CreateWorkspace, LoadNexus\n", "from drtsans.tubecollection import TubeCollection\n", - "from matplotlib import pyplot as plt\n", "#\n", "# \"plot_histograms\" to create fancy plots of the spectram stored in an input workspace\n", "#\n", diff --git a/notebooks/barscan/biosans_wing_detector_barscan.ipynb b/notebooks/barscan/biosans_wing_detector_barscan.ipynb index 48235d1b6..15f789e72 100644 --- a/notebooks/barscan/biosans_wing_detector_barscan.ipynb +++ b/notebooks/barscan/biosans_wing_detector_barscan.ipynb @@ -15,11 +15,10 @@ "outputs": [], "source": [ "import os\n", - "import sys\n", "import time\n", "\n", "# Mantid imports\n", - "from mantid.simpleapi import LoadEventNexus, LoadNexusProcessed, SaveNexus\n", + "from mantid.simpleapi import LoadNexusProcessed\n", "# drtsans imports\n", "from drtsans.pixel_calibration import calculate_barscan_calibration, load_calibration, as_intensities" ] diff --git a/notebooks/biosans/biosans_pixel_calibration_5767.ipynb b/notebooks/biosans/biosans_pixel_calibration_5767.ipynb index 6cbb9f0b2..a2b775360 100644 --- a/notebooks/biosans/biosans_pixel_calibration_5767.ipynb +++ b/notebooks/biosans/biosans_pixel_calibration_5767.ipynb @@ -21,7 +21,7 @@ "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadEventAsWorkspace2D, LoadNexus, SaveNexus\n", + "from mantid.simpleapi import LoadNexus, SaveNexus\n", "from drtsans.mono.biosans import calculate_barscan_calibration, as_intensities, plot_detector\n", "from drtsans.pixel_calibration import Table\n", "\n", @@ -562,8 +562,7 @@ "outputs": [], "source": [ "import os\n", - "from mantid.simpleapi import LoadEventNexus\n", - "from drtsans.mono.biosans import load_calibration, plot_detector\n", + "from drtsans.mono.biosans import load_calibration\n", "\n", "#\n", "# \"plot_main_detector\" and \"plot_wing_detector\" are used to plot both detectors separately\n", @@ -666,7 +665,7 @@ "import os\n", "import time\n", "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "#\n", "# \"plot_main_detector\" and \"plot_wing_detector\" are used to plot both detectors separately\n", @@ -762,7 +761,6 @@ "metadata": {}, "outputs": [], "source": [ - "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "start_time = time.time()\n", "calibration_main = calculate_apparent_tube_width('flood_workspace', component='detector1',\n", @@ -849,7 +847,6 @@ "from mantid.api import mtd\n", "from mantid.simpleapi import CreateWorkspace, LoadNexus\n", "from drtsans.tubecollection import TubeCollection\n", - "from matplotlib import pyplot as plt\n", "#\n", "# \"plot_histograms\" to create fancy plots of the spectram stored in an input workspace\n", "#\n", diff --git a/notebooks/biosans/biosans_pixel_calibration_5767_with_midrange.ipynb b/notebooks/biosans/biosans_pixel_calibration_5767_with_midrange.ipynb index 683d745eb..960d1077c 100644 --- a/notebooks/biosans/biosans_pixel_calibration_5767_with_midrange.ipynb +++ b/notebooks/biosans/biosans_pixel_calibration_5767_with_midrange.ipynb @@ -626,8 +626,7 @@ "outputs": [], "source": [ "import os\n", - "from mantid.simpleapi import LoadEventAsWorkspace2D\n", - "from drtsans.mono.biosans import load_calibration, plot_detector\n", + "from drtsans.mono.biosans import load_calibration\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -734,7 +733,7 @@ "import os\n", "import time\n", "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -767,8 +766,7 @@ "outputs": [], "source": [ "from drtsans.mono.biosans.simulated_intensities import clone_component_intensities, insert_midrange_detector\n", - "from mantid.api import AnalysisDataService\n", - "from mantid.simpleapi import mtd, DeleteWorkspace, DeleteWorkspaces, Integration, LoadEventAsWorkspace2D, SaveNexus\n", + "from mantid.simpleapi import mtd, DeleteWorkspaces, Integration, SaveNexus\n", "import os\n", "\n", "flood_file = '/HFIR/CG3/IPTS-24666/nexus/CG3_5904.nxs.h5'\n", @@ -835,7 +833,6 @@ "metadata": {}, "outputs": [], "source": [ - "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "start_time = time.time()\n", "calibration_main = calculate_apparent_tube_width(\n", @@ -929,7 +926,6 @@ "from mantid.api import mtd\n", "from mantid.simpleapi import CreateWorkspace, LoadNexus\n", "from drtsans.tubecollection import TubeCollection\n", - "from matplotlib import pyplot as plt\n", "#\n", "# \"plot_histograms\" to create fancy plots of the spectram stored in an input workspace\n", "#\n", diff --git a/notebooks/biosans/biosans_pixel_calibration_838.ipynb b/notebooks/biosans/biosans_pixel_calibration_838.ipynb index ab7a0a873..cb2d8a95d 100644 --- a/notebooks/biosans/biosans_pixel_calibration_838.ipynb +++ b/notebooks/biosans/biosans_pixel_calibration_838.ipynb @@ -20,7 +20,7 @@ "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadEventAsWorkspace2D, LoadNexus\n", + "from mantid.simpleapi import LoadNexus\n", "from drtsans.mono.biosans import calculate_barscan_calibration, plot_detector\n", "\n", "#\n", @@ -204,8 +204,7 @@ "outputs": [], "source": [ "import os\n", - "from mantid.simpleapi import LoadEventAsWorkspace2D\n", - "from drtsans.mono.biosans import load_calibration, plot_detector\n", + "from drtsans.mono.biosans import load_calibration\n", "\n", "#\n", "# \"plot_main_detector\" and \"plot_wing_detector\" are used to plot both detectors separately\n", @@ -346,11 +345,10 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import os\n", "import time\n", "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "#\n", "# \"plot_main_detector\" and \"plot_wing_detector\" are used to plot both detectors separately\n", diff --git a/notebooks/biosans/biosans_pixel_calibration_838_midrange.ipynb b/notebooks/biosans/biosans_pixel_calibration_838_midrange.ipynb index b4ae533b1..4a9882597 100644 --- a/notebooks/biosans/biosans_pixel_calibration_838_midrange.ipynb +++ b/notebooks/biosans/biosans_pixel_calibration_838_midrange.ipynb @@ -20,7 +20,7 @@ "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadEventAsWorkspace2D, LoadNexus,LoadNexusProcessed, Integration, SaveNexus, mtd\n", + "from mantid.simpleapi import LoadNexus,LoadNexusProcessed, Integration, mtd\n", "from drtsans.mono.biosans import calculate_barscan_calibration, plot_detector\n", "from drtsans.mono.biosans.simulated_intensities import clone_component_intensities, insert_midrange_detector\n", "#\n", @@ -267,8 +267,7 @@ "outputs": [], "source": [ "import os\n", - "from mantid.simpleapi import LoadEventNexus\n", - "from drtsans.mono.biosans import load_calibration, plot_detector\n", + "from drtsans.mono.biosans import load_calibration\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -421,11 +420,9 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", diff --git a/notebooks/gpsans/gpsans_pixel_calibration.ipynb b/notebooks/gpsans/gpsans_pixel_calibration.ipynb index 5effaca81..a1073a3cd 100644 --- a/notebooks/gpsans/gpsans_pixel_calibration.ipynb +++ b/notebooks/gpsans/gpsans_pixel_calibration.ipynb @@ -355,7 +355,6 @@ "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", "%matplotlib inline\n", - "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] @@ -551,7 +550,6 @@ "\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "%matplotlib inline\n", - "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] @@ -668,7 +666,6 @@ "#\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", - "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] @@ -908,7 +905,6 @@ "#\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", - "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] diff --git a/notebooks/gpsans/gpsans_pixel_calibration_9905.ipynb b/notebooks/gpsans/gpsans_pixel_calibration_9905.ipynb index f2d51475e..c581a1449 100644 --- a/notebooks/gpsans/gpsans_pixel_calibration_9905.ipynb +++ b/notebooks/gpsans/gpsans_pixel_calibration_9905.ipynb @@ -558,7 +558,6 @@ "#\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", - "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] @@ -686,7 +685,6 @@ "source": [ "import numpy as np\n", "from drtsans.tubecollection import TubeCollection\n", - "from matplotlib import pyplot as plt\n", "\n", "#\n", "# \"plot_histograms\" to create fancy plots of the spectram stored in an input workspace\n", @@ -798,7 +796,6 @@ "#\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", - "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] diff --git a/notebooks/gpsans/gpsans_reduction_1config.ipynb b/notebooks/gpsans/gpsans_reduction_1config.ipynb index 51bd594c0..72c8a6a8d 100644 --- a/notebooks/gpsans/gpsans_reduction_1config.ipynb +++ b/notebooks/gpsans/gpsans_reduction_1config.ipynb @@ -52,7 +52,7 @@ "from pprint import pprint as pretty_print\n", "import time\n", "from drtsans.mono.gpsans import (load_all_files, reduce_single_configuration, plot_reduction_output,\n", - " reduction_parameters, update_reduction_parameters, validate_reduction_parameters)" + " reduction_parameters, update_reduction_parameters)" ] }, { @@ -194,7 +194,7 @@ "from pprint import pprint as pretty_print\n", "import time\n", "from drtsans.mono.gpsans import (load_all_files, reduce_single_configuration, plot_reduction_output,\n", - " reduction_parameters, update_reduction_parameters, validate_reduction_parameters)" + " update_reduction_parameters)" ] }, { diff --git a/notebooks/tubewidth/gpsans_tubewidth.ipynb b/notebooks/tubewidth/gpsans_tubewidth.ipynb index 6cb3fb264..f079523f9 100644 --- a/notebooks/tubewidth/gpsans_tubewidth.ipynb +++ b/notebooks/tubewidth/gpsans_tubewidth.ipynb @@ -14,8 +14,6 @@ "outputs": [], "source": [ "# Standard imports\n", - "import os\n", - "import sys\n", "import time\n", "\n", "# Third-party packages\n", @@ -26,10 +24,9 @@ "# Mantid imports\n", "from mantid.simpleapi import LoadEventNexus, Rebin, CreateWorkspace\n", "from mantid.api import mtd\n", - "from mantid import plots\n", "\n", "# drtsans imports\n", - "from drtsans.pixel_calibration import calculate_apparent_tube_width, load_calibration, as_intensities\n", + "from drtsans.pixel_calibration import calculate_apparent_tube_width, load_calibration\n", "from drtsans.plots import plot_detector\n", "from drtsans.tubecollection import TubeCollection" ] diff --git a/scripts/common_utils.py b/scripts/common_utils.py index 417ee71b1..c1fb3acfb 100644 --- a/scripts/common_utils.py +++ b/scripts/common_utils.py @@ -1,4 +1,5 @@ """ Common utility functions for all SANS """ + import os import numpy as np import matplotlib.pyplot as plt diff --git a/scripts/examples/plot_wavelength.py b/scripts/examples/plot_wavelength.py index d5f954029..a3a12e01f 100644 --- a/scripts/examples/plot_wavelength.py +++ b/scripts/examples/plot_wavelength.py @@ -1,6 +1,7 @@ """ Example script to plot the before and after k-correction data for a given slice and frame. """ + import glob import os from pathlib import Path diff --git a/scripts/jupyter_notebooks/gpsans_spice_prepare_sensitivities_template.py b/scripts/jupyter_notebooks/gpsans_spice_prepare_sensitivities_template.py index 4ffeb5ffb..6bbf07727 100644 --- a/scripts/jupyter_notebooks/gpsans_spice_prepare_sensitivities_template.py +++ b/scripts/jupyter_notebooks/gpsans_spice_prepare_sensitivities_template.py @@ -8,6 +8,7 @@ """ + import os import warnings from drtsans.mono.gpsans.prepare_sensitivities_correction import SpiceRun diff --git a/scripts/prepare_sensitivities_biosans.py b/scripts/prepare_sensitivities_biosans.py index cbae0a550..6be10acb3 100644 --- a/scripts/prepare_sensitivities_biosans.py +++ b/scripts/prepare_sensitivities_biosans.py @@ -1,6 +1,7 @@ """ Sensitivities preparation script for Bio-SANS (CG3) """ + from drtsans.mono.biosans.prepare_sensitivities_correction import PrepareSensitivityCorrection import os diff --git a/scripts/prepare_sensitivities_gpsans.py b/scripts/prepare_sensitivities_gpsans.py index 3d7d9c2c4..d605d0a3f 100644 --- a/scripts/prepare_sensitivities_gpsans.py +++ b/scripts/prepare_sensitivities_gpsans.py @@ -1,6 +1,7 @@ """ Sensitivities preparation script for GP-SANS (CG2) """ + from drtsans.prepare_sensivities_correction import PrepareSensitivityCorrection diff --git a/scripts/test_help/biosans_synthetic_sensitivity_dataset.py b/scripts/test_help/biosans_synthetic_sensitivity_dataset.py index dc6a31aa6..4ee0293f5 100644 --- a/scripts/test_help/biosans_synthetic_sensitivity_dataset.py +++ b/scripts/test_help/biosans_synthetic_sensitivity_dataset.py @@ -9,6 +9,7 @@ the Midrange Detector. See Fixture ``biosans_synthetic_sensitivity_dataset`` for detailed use """ + # local imports from drtsans.load import __monitor_counts from drtsans.mono.biosans.simulated_intensities import clone_component_intensities, insert_midrange_detector diff --git a/src/drtsans/absolute_units.py b/src/drtsans/absolute_units.py index 5043e56cd..db704ae51 100644 --- a/src/drtsans/absolute_units.py +++ b/src/drtsans/absolute_units.py @@ -3,6 +3,7 @@ Divide Multiply """ + from mantid.simpleapi import DeleteWorkspace, Divide, Multiply, mtd from mantid.dataobjects import WorkspaceSingleValue diff --git a/src/drtsans/frame_mode.py b/src/drtsans/frame_mode.py index 3e1a1ffac..23074f7c8 100644 --- a/src/drtsans/frame_mode.py +++ b/src/drtsans/frame_mode.py @@ -5,5 +5,6 @@ class FrameMode(Enum): r""" Selects if instrument operating in frame-skipping mode """ + not_skip = 0 skip = 1 diff --git a/src/drtsans/instruments.py b/src/drtsans/instruments.py index 268d09918..50812caa1 100644 --- a/src/drtsans/instruments.py +++ b/src/drtsans/instruments.py @@ -224,7 +224,7 @@ def _empty_download(filepath): idf = os.path.join(str(output_directory), idf_xml) url = f"https://raw.githubusercontent.com/mantidproject/mantid/main/instrument/{idf_xml}" - result = subprocess.run(f"curl -o {idf} {url}", shell=True, capture_output=True, text=True) + result = subprocess.run(f"curl -o {idf} {url}", shell=True, capture_output=True, text=True, check=False) if result.returncode == 0 and not _empty_download(idf): return idf else: diff --git a/src/drtsans/momentum_transfer.py b/src/drtsans/momentum_transfer.py index d8c24c337..3edd67744 100644 --- a/src/drtsans/momentum_transfer.py +++ b/src/drtsans/momentum_transfer.py @@ -487,14 +487,16 @@ def pixel_info(input_workspace): number_spectra = ws.getNumberHistograms() info = [ - [np.nan, np.nan, np.nan, False] - if _masked_or_monitor(spectrum_info, i) - else [ - spectrum_info.twoTheta(i), - spectrum_info.azimuthal(i), - spectrum_info.l2(i), - True, - ] + ( + [np.nan, np.nan, np.nan, False] + if _masked_or_monitor(spectrum_info, i) + else [ + spectrum_info.twoTheta(i), + spectrum_info.azimuthal(i), + spectrum_info.l2(i), + True, + ] + ) for i in range(number_spectra) ] info = np.array(info) diff --git a/src/drtsans/mono/biosans/api.py b/src/drtsans/mono/biosans/api.py index d888f1b5a..e2c8ecedf 100644 --- a/src/drtsans/mono/biosans/api.py +++ b/src/drtsans/mono/biosans/api.py @@ -1,4 +1,5 @@ """ BIOSANS API """ + # local imports import drtsans from drtsans import getWedgeSelection, subtract_background, NoDataProcessedError diff --git a/src/drtsans/mono/dark_current.py b/src/drtsans/mono/dark_current.py index 95e8e6378..e9862152d 100644 --- a/src/drtsans/mono/dark_current.py +++ b/src/drtsans/mono/dark_current.py @@ -4,6 +4,7 @@ Integration Scale """ + from mantid.simpleapi import Minus, mtd, DeleteWorkspace, Scale, Integration r""" links to drtsans imports diff --git a/src/drtsans/mono/gpsans/api.py b/src/drtsans/mono/gpsans/api.py index 953b447a0..1ca44da52 100644 --- a/src/drtsans/mono/gpsans/api.py +++ b/src/drtsans/mono/gpsans/api.py @@ -1,4 +1,5 @@ """ GPSANS API """ + # standard imports from collections import namedtuple import copy diff --git a/src/drtsans/mono/normalization.py b/src/drtsans/mono/normalization.py index 5846a1a96..fdc417131 100644 --- a/src/drtsans/mono/normalization.py +++ b/src/drtsans/mono/normalization.py @@ -3,6 +3,7 @@ CreateSingleValuedWorkspace Divide """ + from mantid.simpleapi import CreateSingleValuedWorkspace, Divide from mantid.api import mtd diff --git a/src/drtsans/mono/spice_xml_parser.py b/src/drtsans/mono/spice_xml_parser.py index dd7cdb0da..817e416de 100644 --- a/src/drtsans/mono/spice_xml_parser.py +++ b/src/drtsans/mono/spice_xml_parser.py @@ -1,6 +1,7 @@ """ Module contains class and method to parse SPICE SANS XML file for DAS sample logs. """ + from xml.etree import ElementTree import numpy as np diff --git a/src/drtsans/pixel_calibration.py b/src/drtsans/pixel_calibration.py index 5d83528c3..53c0ca60a 100644 --- a/src/drtsans/pixel_calibration.py +++ b/src/drtsans/pixel_calibration.py @@ -5,8 +5,6 @@ import numpy as np import numexpr import os -import stat -import sys import warnings @@ -99,6 +97,7 @@ class CalibrationNotFound(Exception): class CalType(enum.Enum): r"""Enumerate the possible types of pixel calibrations""" + BARSCAN = "BARSCAN" TUBEWIDTH = "TUBEWIDTH" @@ -206,9 +205,9 @@ def _validate_symbols(formula): When the formula fails to contain symbols '{y}' and '{tube}'. """ if "{y}" not in formula: - raise ValueError(f'Formula does not contain "{{y}}", e.g. formula = "565-{{y}}+0.008*(191-{{tube}})"') + raise ValueError('Formula does not contain "{y}", e.g. formula = "565-{y}+0.008*(191-{tube})"') if "{tube}" not in formula: - warnings.warn(f'Formula does not contain "{{tube}}", e.g. formula = "565-{{y}}+0.008*(191-{{tube}})"') + warnings.warn('Formula does not contain "{tube}", e.g. formula = "565-{y}+0.008*(191-{tube})"') formula += " + 0.0 * {tube}" return formula @@ -919,10 +918,9 @@ def _consecutive_true_values(values, how_many, reverse=False, raise_message=None if truth_array[i : i + how_many] == pattern: return len(values) - i - 1 if reverse else i # raise an error if the pattern is not found - else: - if raise_message is not None: - raise IndexError(raise_message) - return INCORRECT_PIXEL_ASSIGNMENT # signal for non-identified value + if raise_message is not None: + raise IndexError(raise_message) + return INCORRECT_PIXEL_ASSIGNMENT # signal for non-identified value @namedtuplefy diff --git a/src/drtsans/sensitivity.py b/src/drtsans/sensitivity.py index e769e9930..9008f5723 100644 --- a/src/drtsans/sensitivity.py +++ b/src/drtsans/sensitivity.py @@ -1,5 +1,4 @@ import os -import numpy as np from drtsans.path import exists as path_exists r""" @@ -18,16 +17,12 @@ from mantid.simpleapi import ( mtd, CloneWorkspace, - CalculateEfficiency, DeleteWorkspace, Divide, LoadNexusProcessed, MaskDetectors, MaskDetectorsIf, ReplaceSpecialValues, - SaveNexusProcessed, - Integration, - CreateWorkspace, ) __all__ = ["load_sensitivity_workspace", "apply_sensitivity_correction"] diff --git a/src/drtsans/sensitivity_correction_moving_detectors.py b/src/drtsans/sensitivity_correction_moving_detectors.py index 3faf2b27d..e13bc452e 100644 --- a/src/drtsans/sensitivity_correction_moving_detectors.py +++ b/src/drtsans/sensitivity_correction_moving_detectors.py @@ -1,6 +1,7 @@ """ Module for algorithms to prepare sensitivity for instrument with moving detector """ + import numpy as np from drtsans.mask_utils import circular_mask_from_beam_center, apply_mask import drtsans.mono.gpsans as gp diff --git a/src/drtsans/tof/eqsans/cfg.py b/src/drtsans/tof/eqsans/cfg.py index 1d693a6be..a253479ca 100644 --- a/src/drtsans/tof/eqsans/cfg.py +++ b/src/drtsans/tof/eqsans/cfg.py @@ -1,6 +1,7 @@ """ Reader for EQSANS configuration files in the old format """ + import os import re from copy import deepcopy diff --git a/src/drtsans/tof/eqsans/normalization.py b/src/drtsans/tof/eqsans/normalization.py index ea0e7d4ae..8c7c99be1 100644 --- a/src/drtsans/tof/eqsans/normalization.py +++ b/src/drtsans/tof/eqsans/normalization.py @@ -13,6 +13,7 @@ Scale SplineInterpolation """ + from mantid.simpleapi import ( CloneWorkspace, ConvertToHistogram, diff --git a/tests/examples/BIOSANS.ipynb b/tests/examples/BIOSANS.ipynb index 7ed642705..fc3c7a048 100644 --- a/tests/examples/BIOSANS.ipynb +++ b/tests/examples/BIOSANS.ipynb @@ -13,9 +13,7 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import numpy as np\n", - "import scipy.stats\n", "import matplotlib.pyplot as plt\n", "%matplotlib notebook\n", "\n", @@ -30,7 +28,6 @@ "metadata": {}, "outputs": [], "source": [ - "from mantid.simpleapi import mtd\n", "from mantid import simpleapi as api\n", "#from reduction_workflow.instruments.sans.sns_command_interface import *" ] diff --git a/tests/examples/EQSANS_porasil.ipynb b/tests/examples/EQSANS_porasil.ipynb index e9f0f425d..1f4ba214f 100644 --- a/tests/examples/EQSANS_porasil.ipynb +++ b/tests/examples/EQSANS_porasil.ipynb @@ -15,7 +15,6 @@ "source": [ "import os\n", "import numpy as np\n", - "import scipy.stats\n", "import warnings\n", "warnings.filterwarnings('ignore', module='numpy')\n", "warnings.filterwarnings('ignore')\n", @@ -30,7 +29,6 @@ "metadata": {}, "outputs": [], "source": [ - "from mantid.simpleapi import mtd\n", "from mantid import simpleapi as api" ] }, diff --git a/tests/examples/EQSANS_porasil.py b/tests/examples/EQSANS_porasil.py index ad88f8f7b..a7413cc91 100644 --- a/tests/examples/EQSANS_porasil.py +++ b/tests/examples/EQSANS_porasil.py @@ -1,6 +1,7 @@ """ EQSANS example for the legacy reduction """ + # flake8: noqa import os diff --git a/tests/integration/drtsans/mono/biosans/test_create_event_nexus.py b/tests/integration/drtsans/mono/biosans/test_create_event_nexus.py index d2eeab2a8..14593a6fa 100644 --- a/tests/integration/drtsans/mono/biosans/test_create_event_nexus.py +++ b/tests/integration/drtsans/mono/biosans/test_create_event_nexus.py @@ -1,6 +1,7 @@ """ Integration test to create event nexus file """ + import pytest import numpy as np import os diff --git a/tests/integration/drtsans/mono/biosans/test_overwrite_geometry_meta_data.py b/tests/integration/drtsans/mono/biosans/test_overwrite_geometry_meta_data.py index d23df6de0..0fa60723b 100644 --- a/tests/integration/drtsans/mono/biosans/test_overwrite_geometry_meta_data.py +++ b/tests/integration/drtsans/mono/biosans/test_overwrite_geometry_meta_data.py @@ -2,6 +2,7 @@ Integration test for overwriting instrument geometry related meta data for BIO-SANS (imports alphabetically ordered) """ + # standard imports import json import os diff --git a/tests/integration/drtsans/mono/gpsans/test_create_event_nexus.py b/tests/integration/drtsans/mono/gpsans/test_create_event_nexus.py index 06600a484..6452317c6 100644 --- a/tests/integration/drtsans/mono/gpsans/test_create_event_nexus.py +++ b/tests/integration/drtsans/mono/gpsans/test_create_event_nexus.py @@ -1,6 +1,7 @@ """ Integration test to create event nexus file """ + import pytest import numpy as np import os diff --git a/tests/integration/drtsans/test_auto_wedge.py b/tests/integration/drtsans/test_auto_wedge.py index e6c6b6e93..05e520196 100644 --- a/tests/integration/drtsans/test_auto_wedge.py +++ b/tests/integration/drtsans/test_auto_wedge.py @@ -3,6 +3,7 @@ Much of the spreadsheet is split into smaller tests to aid in verifying the intermediate results """ + import pytest import numpy as np import os diff --git a/tests/integration/drtsans/test_prepare_sensitivities.py b/tests/integration/drtsans/test_prepare_sensitivities.py index d6de92451..f39258257 100644 --- a/tests/integration/drtsans/test_prepare_sensitivities.py +++ b/tests/integration/drtsans/test_prepare_sensitivities.py @@ -1,6 +1,7 @@ """ Test EASANS sensitivities preparation algorithm """ + import pytest from unittest.mock import patch as mock_patch import numpy as np diff --git a/tests/integration/drtsans/tof/eqsans/test_generate_report.py b/tests/integration/drtsans/tof/eqsans/test_generate_report.py index dcdd48444..f287a4f21 100644 --- a/tests/integration/drtsans/tof/eqsans/test_generate_report.py +++ b/tests/integration/drtsans/tof/eqsans/test_generate_report.py @@ -9,7 +9,7 @@ def test_generate_report(datarepo_dir): cmd = ["generate_report", TEST_FILE] - result = subprocess.run(cmd, capture_output=True, text=True) + result = subprocess.run(cmd, capture_output=True, text=True, check=False) pattern = """drtsan version 1.10.2+d20231019 mantid version 6.8.0 diff --git a/tests/integration/drtsans/tof/eqsans/test_integration_api.py b/tests/integration/drtsans/tof/eqsans/test_integration_api.py index 1c1722554..fca2fc722 100644 --- a/tests/integration/drtsans/tof/eqsans/test_integration_api.py +++ b/tests/integration/drtsans/tof/eqsans/test_integration_api.py @@ -1,6 +1,7 @@ """ Test top-level API """ + from os.path import join as pj import pytest from pytest import approx diff --git a/tests/unit/drtsans/mono/test_momentum_transfer.py b/tests/unit/drtsans/mono/test_momentum_transfer.py index 03690dce3..6e5bdf78c 100644 --- a/tests/unit/drtsans/mono/test_momentum_transfer.py +++ b/tests/unit/drtsans/mono/test_momentum_transfer.py @@ -53,12 +53,7 @@ def sigma_neutron(wavelength, delta_lambda, Qx, Qy, theta, L1, L2, R1, R2, x3, y sigma_y = (2 * np.pi * np.cos(theta) * np.cos(2 * theta) ** 2 / wavelength / L2) ** 2 sigma_y = ( sigma_y - * ( - (L2 / L1) ** 2 * R1**2 / 4 - + (1 + L2 / L1) ** 2 * R2**2 / 4 - + y3**2 / 12 - + 2 * B**2 * wavelength**4 * r / 3 - ) + * ((L2 / L1) ** 2 * R1**2 / 4 + (1 + L2 / L1) ** 2 * R2**2 / 4 + y3**2 / 12 + 2 * B**2 * wavelength**4 * r / 3) + Qy**2 / 6 * r ) sigma_x = np.sqrt(sigma_x) diff --git a/tests/unit/drtsans/mono/test_sample_detector_offsets.py b/tests/unit/drtsans/mono/test_sample_detector_offsets.py index d272c94e5..1483823a8 100644 --- a/tests/unit/drtsans/mono/test_sample_detector_offsets.py +++ b/tests/unit/drtsans/mono/test_sample_detector_offsets.py @@ -1,6 +1,7 @@ """ Test methods to determine the correct sample and detector positions from meta data and overwriting """ + import os import pytest from mantid.simpleapi import LoadEmptyInstrument diff --git a/tests/unit/drtsans/test_detector.py b/tests/unit/drtsans/test_detector.py index 4688d4dbd..f2f1941ef 100644 --- a/tests/unit/drtsans/test_detector.py +++ b/tests/unit/drtsans/test_detector.py @@ -1,4 +1,3 @@ -import pytest from mantid.simpleapi import LoadEmptyInstrument, MaskBTP from drtsans.detector import Component diff --git a/tests/unit/drtsans/tof/eqsans/test_resolution.py b/tests/unit/drtsans/tof/eqsans/test_resolution.py index fba873b02..5b977f56d 100644 --- a/tests/unit/drtsans/tof/eqsans/test_resolution.py +++ b/tests/unit/drtsans/tof/eqsans/test_resolution.py @@ -1,6 +1,7 @@ """ Test EQSANS resolution """ + import numpy as np from collections import namedtuple from scipy import constants @@ -111,10 +112,7 @@ def sigma_neutron( # dQy sigma_y = (2.0 * np.pi * np.cos(theta) * np.cos(2 * theta) ** 2 / wave_length / L2) ** 2 sigma_y = sigma_y * ( - (L2 / L1) ** 2 * R1**2 / 4 - + (1 + L2 / L1) ** 2 * R2**2 / 4 - + y3**2 / 12 - + B**2 * wave_length**4 * 2 / 3 * r + (L2 / L1) ** 2 * R1**2 / 4 + (1 + L2 / L1) ** 2 * R2**2 / 4 + y3**2 / 12 + B**2 * wave_length**4 * 2 / 3 * r ) sigma_y = np.sqrt( sigma_y + Qy**2 / 12 * (r + (3.9560 * sig_emission) ** 2 / (1000 * wave_length * (s2p + m2s)) ** 2) diff --git a/tests/unit/test_all.py b/tests/unit/test_all.py index 1aa846a05..184e91833 100644 --- a/tests/unit/test_all.py +++ b/tests/unit/test_all.py @@ -4,6 +4,7 @@ See http://xion.io/post/code/python-all-wild-imports.html for more information """ + import pytest From f799ef57da4f6cb1650dd8dd0088a7f90b5ed24f Mon Sep 17 00:00:00 2001 From: "Kevin A. Tactac" Date: Tue, 17 Dec 2024 16:42:14 -0500 Subject: [PATCH 03/23] Add testing and packaging files --- .github/dependabot.yml | 11 +++++++ .github/workflows/package.yml | 45 ++++++++++++++++++++++++++++ .github/workflows/test.yml | 56 +++++++++++++++++++++++++++++++++++ 3 files changed, 112 insertions(+) create mode 100644 .github/dependabot.yml create mode 100644 .github/workflows/package.yml create mode 100644 .github/workflows/test.yml diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..5cf92a4da --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "github-actions" # See documentation for possible values + directory: "/" # Workflow files stored in the default location of `.github/workflows` + schedule: + interval: "weekly" diff --git a/.github/workflows/package.yml b/.github/workflows/package.yml new file mode 100644 index 000000000..8ab9c8809 --- /dev/null +++ b/.github/workflows/package.yml @@ -0,0 +1,45 @@ +name: conda packaging and deployment + +on: + workflow_dispatch: + push: + branches: [qa, main] + tags: ['v*'] + +jobs: + linux: + runs-on: ubuntu-latest + defaults: + run: + shell: bash -l {0} + steps: + - uses: actions/checkout@v4 + - uses: conda-incubator/setup-miniconda@v3 + with: + auto-update-conda: true + channels: conda-forge,defaults + use-mamba: true + environment-file: environment.yml + - name: install additional dependencies + run: | + echo "installing additional dependencies from environment_development.yml" + - name: build conda package + run: | + # set up environment + cd conda.recipe + echo "versioningit $(versioningit ../)" + # build the package + VERSION=$(versioningit ../) conda mambabuild --channel conda-forge --output-folder . . + conda verify noarch/drtsans*.tar.bz2 + - name: upload conda package to anaconda + shell: bash -l {0} + if: startsWith(github.ref, 'refs/tags/v') + env: + ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_TOKEN }} + IS_RC: ${{ contains(github.ref, 'rc') }} + run: | + # label is main or rc depending on the tag-name + CONDA_LABEL="main" + if [ "${IS_RC}" = "true" ]; then CONDA_LABEL="rc"; fi + echo pushing ${{ github.ref }} with label $CONDA_LABEL + anaconda upload --label $CONDA_LABEL conda.recipe/noarch/drtsans*.tar.bz2 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 000000000..a2d12eab6 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,56 @@ +name: test + +on: + workflow_dispatch: + pull_request: + push: + branches: [next, qa, main] + tags: ['v*'] + +jobs: + linux: + runs-on: ubuntu-latest + defaults: + run: + shell: bash -l {0} + steps: + - uses: actions/checkout@v4 + - uses: conda-incubator/setup-miniconda@v3 + with: + auto-update-conda: true + channels: conda-forge,defaults + use-mamba: true + environment-file: environment.yml + - name: install additional dependencies + run: | + echo "installing additional dependencies if cannot be installed from conda" + - name: run unit tests + run: | + echo "running unit tests" + python -m pytest --cov=src --cov-report=xml --cov-report=term-missing tests/unit/ + mkdir .coverage.d + mv .coverage .coverage.d/unit + - name: run integration tests + run: | + echo "running integration tests" + git submodule update --init + python -m pytest --cov=src --cov-report=xml --cov-report=term-missing tests/integration/ + mv .coverage .coverage.d/integration + - name: upload coverage to codecov + run: | + coverage combine .coverage.d/ + uses: codecov/codecov-action@v5 + if: + github.actor != 'dependabot[bot]' + with: + token: ${{ secrets.CODECOV_TOKEN }} + - name: build conda package + run: | + # test that the conda package builds + cd conda.recipe + echo "versioningit $(versioningit ../)" + # conda channels could have been defined in the conda-incubator, but you can copy/paste the lines + # below to build the conda package in your local machine + CHANNELS="--channel mantid/label/main --channel conda-forge" + VERSION=$(versioningit ../) conda mambabuild $CHANNELS --output-folder . . + conda verify noarch/drtsans*.tar.bz2 From a33e8c7e64e843c837ff162a56ee43dcd0c42e97 Mon Sep 17 00:00:00 2001 From: glass-ships Date: Wed, 18 Dec 2024 10:38:45 -0500 Subject: [PATCH 04/23] pre-commit - use ruff instead of black --- .pre-commit-config.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6c12323fc..b8b1bf4c0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,10 +24,7 @@ repos: hooks: - id: ruff args: [--no-cache, --fix, --exit-non-zero-on-fix] - - repo: https://github.com/psf/black - rev: 24.10.0 - hooks: - - id: black + - id: ruff-format - repo: https://github.com/kynan/nbstripout rev: 0.8.1 hooks: From 24c1257c46e964f8bda2cb02eea8d92c3ee75450 Mon Sep 17 00:00:00 2001 From: glass-ships Date: Wed, 18 Dec 2024 11:11:25 -0500 Subject: [PATCH 05/23] Add notebooks to excludes in pyproject.toml --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index b97f2748d..befeebadb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,6 +86,9 @@ line-length = 119 [tool.ruff] cache-dir = "/tmp/ruff_cache" line-length = 119 +extend-exclude = ["notebooks"] + +[tool.ruff.lint] # https://beta.ruff.rs/docs/rules/ # suggestions: BLE blind exceptions, I sorts imports # Full pylint PL = PLC, PLE, PLR (~500 issues), PLW. Enable most From 2a0027f4b21d2433945cb9261b1afafe06a48094 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 18 Dec 2024 17:29:27 +0000 Subject: [PATCH 06/23] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- scripts/common_utils.py | 2 +- scripts/generate_report | 1 + ...ns_spice_prepare_sensitivities_template.py | 10 +- scripts/prepare_sensitivities_biosans.py | 2 +- scripts/prepare_sensitivities_gpsans.py | 2 +- src/drtsans/absolute_units.py | 2 +- src/drtsans/files/hdf5_rw.py | 6 +- src/drtsans/geometry.py | 5 +- src/drtsans/instruments.py | 3 +- src/drtsans/iq.py | 10 +- src/drtsans/mask_utils.py | 4 +- src/drtsans/momentum_transfer.py | 5 +- src/drtsans/mono/biosans/api.py | 6 +- src/drtsans/mono/gpsans/api.py | 7 +- src/drtsans/mono/load.py | 3 +- src/drtsans/mono/meta_data.py | 20 ++-- src/drtsans/plots/api.py | 2 +- src/drtsans/reductionlog.py | 5 +- src/drtsans/sensitivity_correction_patch.py | 5 +- tests/conftest.py | 16 +-- tests/examples/BIOSANS.ipynb | 112 ++++++++++-------- tests/examples/EQSANS_porasil.ipynb | 66 ++++++----- tests/examples/EQSANS_porasil.py | 2 +- tests/examples/porasil.py | 4 +- .../drtsans/mono/biosans/test_load.py | 28 ++--- .../mono/gpsans/test_find_beam_center.py | 6 +- .../drtsans/mono/gpsans/test_load.py | 14 +-- tests/integration/drtsans/mono/test_load.py | 9 +- .../drtsans/test_prepare_sensitivities.py | 2 +- .../test_elastic_and_inelastic_corrections.py | 6 +- .../tof/eqsans/test_integration_api.py | 2 +- .../tof/eqsans/test_simulated_events.py | 12 +- .../drtsans/mono/gpsans/test_polarization.py | 4 +- tests/unit/drtsans/test_event_nexus_nodes.py | 4 +- tests/unit/drtsans/test_i_of_q_2d_binning.py | 24 ++-- .../drtsans/tof/eqsans/test_resolution.py | 2 +- .../drtsans/tof/eqsans/test_transmission.py | 8 +- 37 files changed, 216 insertions(+), 205 deletions(-) diff --git a/scripts/common_utils.py b/scripts/common_utils.py index c1fb3acfb..ff93a07a9 100644 --- a/scripts/common_utils.py +++ b/scripts/common_utils.py @@ -1,4 +1,4 @@ -""" Common utility functions for all SANS """ +"""Common utility functions for all SANS""" import os import numpy as np diff --git a/scripts/generate_report b/scripts/generate_report index 2114e93f2..63c038b5c 100755 --- a/scripts/generate_report +++ b/scripts/generate_report @@ -3,6 +3,7 @@ """ Script to generate a report from an hdf5 log file """ + import sys import os import numpy as np diff --git a/scripts/jupyter_notebooks/gpsans_spice_prepare_sensitivities_template.py b/scripts/jupyter_notebooks/gpsans_spice_prepare_sensitivities_template.py index 6bbf07727..6f97f07bb 100644 --- a/scripts/jupyter_notebooks/gpsans_spice_prepare_sensitivities_template.py +++ b/scripts/jupyter_notebooks/gpsans_spice_prepare_sensitivities_template.py @@ -1,10 +1,10 @@ """ - SANS sensitivities preparation script +SANS sensitivities preparation script - # goal - 1. implement a universal mask_beam_center(flood_ws, beam_center_mask=None, beam_center_ws=None) - for 3 types of mask - 2. add option for wing/main detector for BIOSANS:w +# goal +1. implement a universal mask_beam_center(flood_ws, beam_center_mask=None, beam_center_ws=None) + for 3 types of mask +2. add option for wing/main detector for BIOSANS:w """ diff --git a/scripts/prepare_sensitivities_biosans.py b/scripts/prepare_sensitivities_biosans.py index 6be10acb3..2a1a9abb1 100644 --- a/scripts/prepare_sensitivities_biosans.py +++ b/scripts/prepare_sensitivities_biosans.py @@ -1,5 +1,5 @@ """ - Sensitivities preparation script for Bio-SANS (CG3) +Sensitivities preparation script for Bio-SANS (CG3) """ from drtsans.mono.biosans.prepare_sensitivities_correction import PrepareSensitivityCorrection diff --git a/scripts/prepare_sensitivities_gpsans.py b/scripts/prepare_sensitivities_gpsans.py index d605d0a3f..1970eeb11 100644 --- a/scripts/prepare_sensitivities_gpsans.py +++ b/scripts/prepare_sensitivities_gpsans.py @@ -1,5 +1,5 @@ """ - Sensitivities preparation script for GP-SANS (CG2) +Sensitivities preparation script for GP-SANS (CG2) """ from drtsans.prepare_sensivities_correction import PrepareSensitivityCorrection diff --git a/src/drtsans/absolute_units.py b/src/drtsans/absolute_units.py index db704ae51..2fb9f6c8a 100644 --- a/src/drtsans/absolute_units.py +++ b/src/drtsans/absolute_units.py @@ -1,4 +1,4 @@ -r""" Links to Mantid algorithms +r"""Links to Mantid algorithms DeleteWorkspace Divide Multiply diff --git a/src/drtsans/files/hdf5_rw.py b/src/drtsans/files/hdf5_rw.py index 3fa185b97..2695f9613 100644 --- a/src/drtsans/files/hdf5_rw.py +++ b/src/drtsans/files/hdf5_rw.py @@ -89,16 +89,14 @@ def match(self, other_node): # compare attributes if set(self._attributes.keys()) != set(other_node.attributes.keys()): print( - "Data node {} Attributes are not same:\nself - other = {}]\nother - self = {}" - "".format( + "Data node {} Attributes are not same:\nself - other = {}]\nother - self = {}" "".format( self.name, set(self._attributes.keys()) - set(other_node.attributes.keys()), set(other_node.attributes.keys()) - set(self._attributes.keys()), ) ) raise KeyError( - "Data node {} Attributes are not same:\nself - other = {}]\nother - self = {}" - "".format( + "Data node {} Attributes are not same:\nself - other = {}]\nother - self = {}" "".format( self.name, set(self._attributes.keys()) - set(other_node.attributes.keys()), set(other_node.attributes.keys()) - set(self._attributes.keys()), diff --git a/src/drtsans/geometry.py b/src/drtsans/geometry.py index 4e24d345f..d3816f560 100644 --- a/src/drtsans/geometry.py +++ b/src/drtsans/geometry.py @@ -836,8 +836,9 @@ def translate_detector_by_z(input_workspace, z=None, relative=True): update_log = True if (not relative) or (z != 0.0): logger.debug( - "Moving detector along Z = {} is relative = {} to component {}" - "".format(z, relative, main_detector_name(input_workspace)) + "Moving detector along Z = {} is relative = {} to component {}" "".format( + z, relative, main_detector_name(input_workspace) + ) ) MoveInstrumentComponent( diff --git a/src/drtsans/instruments.py b/src/drtsans/instruments.py index 50812caa1..7bcb5fbde 100644 --- a/src/drtsans/instruments.py +++ b/src/drtsans/instruments.py @@ -328,7 +328,8 @@ def copy_to_newest_instrument( target.getAxis(0).setUnit(origin_unit) target.setYUnit(origin.YUnit()) MergeRuns( - InputWorkspaces=[target_workspace, input_workspace], OutputWorkspace=target_workspace # order is necessary + InputWorkspaces=[target_workspace, input_workspace], + OutputWorkspace=target_workspace, # order is necessary ) # Move components to the positions they have in input_workspace by reading their positions # in the logs. This is implicitly done when invoking algorithm LoadInstrument. diff --git a/src/drtsans/iq.py b/src/drtsans/iq.py index c0258644b..cd03581c6 100644 --- a/src/drtsans/iq.py +++ b/src/drtsans/iq.py @@ -117,14 +117,16 @@ def valid_wedge(min_angle, max_angle) -> List[Tuple[float, float]]: if diff < 180.0: return [(min_angle, max_angle)] raise ValueError( - "wedge angle is greater than 180 degrees: {:.1f} - {:.1f} = {:.1f} < 180" - "".format(max_angle, min_angle, diff) + "wedge angle is greater than 180 degrees: {:.1f} - {:.1f} = {:.1f} < 180" "".format( + max_angle, min_angle, diff + ) ) diff = min_angle - max_angle if diff <= 180: raise ValueError( - "wedge angle is greater than 180 degrees: {:.1f} - {:.1f} = {:.1f} <= 180" - "".format(min_angle, max_angle, diff) + "wedge angle is greater than 180 degrees: {:.1f} - {:.1f} = {:.1f} <= 180" "".format( + min_angle, max_angle, diff + ) ) return [(min_angle, 270.1), (-90.1, max_angle)] diff --git a/src/drtsans/mask_utils.py b/src/drtsans/mask_utils.py index f0f655879..ee29e50ae 100644 --- a/src/drtsans/mask_utils.py +++ b/src/drtsans/mask_utils.py @@ -258,9 +258,7 @@ def circular_mask_from_beam_center(input_workspace, radius, unit="mm"): - """.format( - r - ) + """.format(r) det_ids = FindDetectorsInShape(Workspace=input_workspace, ShapeXML=cylinder) return det_ids diff --git a/src/drtsans/momentum_transfer.py b/src/drtsans/momentum_transfer.py index 3edd67744..b0ed1d1a9 100644 --- a/src/drtsans/momentum_transfer.py +++ b/src/drtsans/momentum_transfer.py @@ -83,8 +83,9 @@ def convert_to_q(ws, mode, resolution_function=None, **kwargs): wsh = mtd[str(ws)] if wsh.getAxis(0).getUnit().unitID() != "Wavelength": raise RuntimeError( - "Input workspace {} for calculate Q and resolution must be in unit Wavelength but not {}" - "".format(wsh, wsh.getAxis(0).getUnit().unitID()) + "Input workspace {} for calculate Q and resolution must be in unit Wavelength but not {}" "".format( + wsh, wsh.getAxis(0).getUnit().unitID() + ) ) # switch according to mode diff --git a/src/drtsans/mono/biosans/api.py b/src/drtsans/mono/biosans/api.py index e2c8ecedf..ec7e25deb 100644 --- a/src/drtsans/mono/biosans/api.py +++ b/src/drtsans/mono/biosans/api.py @@ -1,4 +1,4 @@ -""" BIOSANS API """ +"""BIOSANS API""" # local imports import drtsans @@ -2129,9 +2129,7 @@ def file_has_midrange_detector(sample: str, instrument_name: str, ipts: str, dir instrument=instrument_name, ipts=ipts, directory=directory, - ).split( - "," - )[0] + ).split(",")[0] out_ws_name = mtd.unique_hidden_name() diff --git a/src/drtsans/mono/gpsans/api.py b/src/drtsans/mono/gpsans/api.py index 1ca44da52..7b669bd41 100644 --- a/src/drtsans/mono/gpsans/api.py +++ b/src/drtsans/mono/gpsans/api.py @@ -1,4 +1,4 @@ -""" GPSANS API """ +"""GPSANS API""" # standard imports from collections import namedtuple @@ -321,8 +321,9 @@ def load_all_files( smearing_pixel_size_y=smearing_pixel_size_y_dict[meta_data.SAMPLE], ) logger.information( - "[META] Wavelength range is from {} to {}" - "".format(mtd[ws_name].readX(0)[0], mtd[ws_name].readX(0)[1]) + "[META] Wavelength range is from {} to {}" "".format( + mtd[ws_name].readX(0)[0], mtd[ws_name].readX(0)[1] + ) ) # Apply mask for btp_params in default_mask: diff --git a/src/drtsans/mono/load.py b/src/drtsans/mono/load.py index cf60ca72f..18a5f540c 100644 --- a/src/drtsans/mono/load.py +++ b/src/drtsans/mono/load.py @@ -346,8 +346,7 @@ def set_sample_detector_position( # Check current instrument setup and meta data (sample logs) logger.notice( - "{} Sample to detector distance = {} (calculated) vs {} (meta) mm" - "".format( + "{} Sample to detector distance = {} (calculated) vs {} (meta) mm" "".format( str(ws), sample_detector_distance(ws, search_logs=False), sample_detector_distance(ws, search_logs=True), diff --git a/src/drtsans/mono/meta_data.py b/src/drtsans/mono/meta_data.py index 0442b9035..3b81c61f0 100644 --- a/src/drtsans/mono/meta_data.py +++ b/src/drtsans/mono/meta_data.py @@ -149,8 +149,9 @@ def _parse_new_meta_data_json(reduction_input, meta_name, unit_conversion_factor except KeyError as key_error: # Required value cannot be found raise KeyError( - "JSON file shall have key as configuration:{}:{}. Error message: {}" - "".format(meta_name, run_type, key_error) + "JSON file shall have key as configuration:{}:{}. Error message: {}" "".format( + meta_name, run_type, key_error + ) ) meta_value_dict[SAMPLE] = overwrite_value @@ -179,14 +180,16 @@ def _parse_new_meta_data_json(reduction_input, meta_name, unit_conversion_factor except ValueError as value_error: # Overwritten value error raise RuntimeError( - "JSON value of key configuration:{}:{} has a value error. Error message: {}" - "".format(meta_name, run_type, value_error) + "JSON value of key configuration:{}:{} has a value error. Error message: {}" "".format( + meta_name, run_type, value_error + ) ) except KeyError as key_error: # Required value cannot be found raise KeyError( - "JSON file shall have key as configuration:{}:{}. Error message: {}" - "".format(meta_name, run_type, key_error) + "JSON file shall have key as configuration:{}:{}. Error message: {}" "".format( + meta_name, run_type, key_error + ) ) @@ -384,8 +387,9 @@ def get_sample_detector_offset( # read sample log for SampleToSi and convert to meter from mm sample_to_si = sample_logs.find_log_with_units(sample_si_meta_name, "mm") * 1e-3 logger.notice( - "[META INIT] User SSD = {}, SWD = {}," - "".format(overwrite_sample_detector_distance, overwrite_sample_si_distance) + "[META INIT] User SSD = {}, SWD = {}," "".format( + overwrite_sample_detector_distance, overwrite_sample_si_distance + ) ) logger.notice("[META] EPICS Sample to Si = {} meter".format(sample_to_si)) logger.notice( diff --git a/src/drtsans/plots/api.py b/src/drtsans/plots/api.py index 6221967db..23cab18d9 100644 --- a/src/drtsans/plots/api.py +++ b/src/drtsans/plots/api.py @@ -120,7 +120,7 @@ def _q_label(backend: str, subscript=""): if backend == Backend.MATPLOTLIB: return "$" + label + r" (\AA^{-1})$" else: # mpld3 - return label + " (1/{})".format("\u212B") + return label + " (1/{})".format("\u212b") def plot_IQmod(workspaces, filename, loglog=True, backend: str = "d3", errorbar_kwargs={}, **kwargs): diff --git a/src/drtsans/reductionlog.py b/src/drtsans/reductionlog.py index 7cf2e1c6f..17d3e8979 100644 --- a/src/drtsans/reductionlog.py +++ b/src/drtsans/reductionlog.py @@ -405,8 +405,9 @@ def savereductionlog(filename="", detectordata=None, **kwargs): for _slice_name in detectordata.keys(): if type(detectordata[_slice_name]) is not dict: raise RuntimeError( - "detectordata value has the wrong type. It should be a dictionary " - "and not a {}".format(type(detectordata[_slice_name])) + "detectordata value has the wrong type. It should be a dictionary " "and not a {}".format( + type(detectordata[_slice_name]) + ) ) for _detector_name in detectordata[_slice_name].keys(): diff --git a/src/drtsans/sensitivity_correction_patch.py b/src/drtsans/sensitivity_correction_patch.py index 5b79aee58..1b9c038f3 100644 --- a/src/drtsans/sensitivity_correction_patch.py +++ b/src/drtsans/sensitivity_correction_patch.py @@ -151,8 +151,9 @@ def calculate_sensitivity_correction( # This shall be an option later if len(xx) < min_detectors_per_tube: logger.error( - "Skipping tube with indices {} with {} non-masked value. Too many " - "masked or dead pixels.".format(j, len(xx)) + "Skipping tube with indices {} with {} non-masked value. Too many " "masked or dead pixels.".format( + j, len(xx) + ) ) continue diff --git a/tests/conftest.py b/tests/conftest.py index d79358cb6..c1650849a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -434,12 +434,8 @@ def porasil_slice1m(reference_dir): dbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # noqa: E501 direct beam transmission sample dbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # noqa: E501 direct beam transmission empty b=pjoin(reference_dir.eqsans, "EQSANS_92163.nxs.h5"), # background - bdbts=pjoin( - reference_dir.eqsans, "EQSANS_92161.nxs.h5" - ), # noqa: E501 background direct beam transmission sample - bdbte=pjoin( - reference_dir.eqsans, "EQSANS_92160.nxs.h5" - ), # noqa: E501 background_direct_beam_transmission_empty + bdbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # noqa: E501 background direct beam transmission sample + bdbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # noqa: E501 background_direct_beam_transmission_empty ) lds = dict( @@ -912,9 +908,7 @@ def n_pack(req_params): {locations_str} - """.format( - locations_str="\n".join(locations) - ) + """.format(locations_str="\n".join(locations)) # # Generate the n-pack type x_start = -(number_tubes - 1) * (tube_center_spacing / 2.0) @@ -928,9 +922,7 @@ def n_pack(req_params): {locations_str} - """.format( - locations_str="\n".join(locations) - ) + """.format(locations_str="\n".join(locations)) # # Put everything together geometry_params = { diff --git a/tests/examples/BIOSANS.ipynb b/tests/examples/BIOSANS.ipynb index fc3c7a048..acc9ab2ad 100644 --- a/tests/examples/BIOSANS.ipynb +++ b/tests/examples/BIOSANS.ipynb @@ -15,11 +15,13 @@ "source": [ "import numpy as np\n", "import matplotlib.pyplot as plt\n", + "\n", "%matplotlib notebook\n", "\n", "import warnings\n", - "warnings.filterwarnings('ignore', module='numpy')\n", - "warnings.filterwarnings('ignore')" + "\n", + "warnings.filterwarnings(\"ignore\", module=\"numpy\")\n", + "warnings.filterwarnings(\"ignore\")" ] }, { @@ -29,7 +31,7 @@ "outputs": [], "source": [ "from mantid import simpleapi as api\n", - "#from reduction_workflow.instruments.sans.sns_command_interface import *" + "# from reduction_workflow.instruments.sans.sns_command_interface import *" ] }, { @@ -60,12 +62,13 @@ "metadata": {}, "outputs": [], "source": [ - "config = dict(sample_offset=0, # 340\n", - " detector_offset=0,\n", - " dark_current=\"/HFIR/CG3/IPTS-23782/nexus/CG3_795.nxs.h5\",\n", - " flood=\"/HFIR/CG3/IPTS-23782/nexus/CG3_821.nxs.h5\",\n", - " prepared_flood = '/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5',\n", - " )\n", + "config = dict(\n", + " sample_offset=0, # 340\n", + " detector_offset=0,\n", + " dark_current=\"/HFIR/CG3/IPTS-23782/nexus/CG3_795.nxs.h5\",\n", + " flood=\"/HFIR/CG3/IPTS-23782/nexus/CG3_821.nxs.h5\",\n", + " prepared_flood=\"/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5\",\n", + ")\n", "\n", "mask_file = \"/SNS/users/m2d/git/analysis_playground/eqsans/notebooks/data/EQSANS_104088_mask.xml\"\n", "mask_nxs = \"/SNS/users/m2d/git/analysis_playground/eqsans/notebooks/data/EQSANS_104088_mask.nxs\"" @@ -80,14 +83,16 @@ "def load_data(filename, mask_wing=True, center_x=None, center_y=None, center_y_wing=None, output_workspace=None):\n", " ws = api.LoadEventNexus(Filename=filename, OutputWorkspace=output_workspace)\n", " ws = api.HFIRSANS2Wavelength(ws, OutputWorkspace=output_workspace)\n", - " api.LoadInstrument(Workspace=ws,\n", - " Filename='/SNS/users/3y9/Desktop/BIOSANS-commisioning/BIOSANS_Definition_2019_2100.xml',\n", - " RewriteSpectraMap='True')\n", + " api.LoadInstrument(\n", + " Workspace=ws,\n", + " Filename=\"/SNS/users/3y9/Desktop/BIOSANS-commisioning/BIOSANS_Definition_2019_2100.xml\",\n", + " RewriteSpectraMap=\"True\",\n", + " )\n", "\n", " # Mask wing\n", " if mask_wing:\n", - " api.MaskDetectors(ws, ComponentList='wing_detector')\n", - " \n", + " api.MaskDetectors(ws, ComponentList=\"wing_detector\")\n", + "\n", " if center_x is not None and center_y is not None and center_y_wing is not None:\n", " biosans.center_detector(ws, center_x=center_x, center_y=center_y, center_y_wing=center_y_wing)\n", "\n", @@ -100,7 +105,7 @@ "metadata": {}, "outputs": [], "source": [ - "#ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_957.nxs.h5\")\n", + "# ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_957.nxs.h5\")\n", "def plot_det(ws):\n", " nx = 192\n", " ny = 256\n", @@ -116,12 +121,12 @@ "\n", " print(data.shape)\n", "\n", - " counts = data.reshape((nx,ny,len(wl)))\n", - " d_counts = err.reshape((nx,ny,len(wl)))\n", + " counts = data.reshape((nx, ny, len(wl)))\n", + " d_counts = err.reshape((nx, ny, len(wl)))\n", " _counts = np.sum(counts, axis=2)\n", "\n", " print(counts.shape)\n", - " print(counts[:,:,0].shape)\n", + " print(counts[:, :, 0].shape)\n", " plt.figure()\n", " plt.pcolor(_counts.T)" ] @@ -141,20 +146,20 @@ "source": [ "# The new way of dealing with the beam center is in real space, relative to the center of the detector.\n", "# The EQSANS detector is 192 x 256 pixels, and the pixel sizes are 5.5 mm x 4.3 mm\n", - "x_center, y_center = [-0.01230061, -0.0351677 ]\n", + "x_center, y_center = [-0.01230061, -0.0351677]\n", "beam_center_drt = (-0.012300612863652916, -0.035167700781957245, -0.034614155228279528)\n", "\n", "print(\"Old reduction's beam center in real space: %g %g\" % (x_center, y_center))\n", "\n", - "db_ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_815.nxs.h5\", output_workspace='beam')\n", + "db_ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_815.nxs.h5\", output_workspace=\"beam\")\n", "\n", "center = biosans.find_beam_center(db_ws)\n", "\n", "print(\"Beam center found: %g %g %g\" % (center[0], center[1], center[2]))\n", "\n", - "config['center_x'] = center[0]\n", - "config['center_y'] = center[1]\n", - "config['center_y_wing'] = center[2]" + "config[\"center_x\"] = center[0]\n", + "config[\"center_y\"] = center[1]\n", + "config[\"center_y_wing\"] = center[2]" ] }, { @@ -170,15 +175,17 @@ "metadata": {}, "outputs": [], "source": [ - "flood_ws = load_data(config['flood'],\n", - " center_x=config['center_x'],\n", - " center_y=config['center_y'],\n", - " center_y_wing=config['center_y_wing'],\n", - " output_workspace='CG3_flood')\n", - "drtsans.calculate_sensitivity_correction(flood_ws, \n", - " filename='/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5',\n", - " output_workspace='CG3_sensitivity')\n", - "config['prepared_flood'] = '/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5'" + "flood_ws = load_data(\n", + " config[\"flood\"],\n", + " center_x=config[\"center_x\"],\n", + " center_y=config[\"center_y\"],\n", + " center_y_wing=config[\"center_y_wing\"],\n", + " output_workspace=\"CG3_flood\",\n", + ")\n", + "drtsans.calculate_sensitivity_correction(\n", + " flood_ws, filename=\"/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5\", output_workspace=\"CG3_sensitivity\"\n", + ")\n", + "config[\"prepared_flood\"] = \"/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5\"" ] }, { @@ -195,35 +202,39 @@ "outputs": [], "source": [ "%%time\n", - "ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_957.nxs.h5\",\n", - " center_x=config['center_x'],\n", - " center_y=config['center_y'],\n", - " center_y_wing=config['center_y_wing'],\n", - " output_workspace='CG3_957')\n", + "ws = load_data(\n", + " \"/HFIR/CG3/IPTS-23782/nexus/CG3_957.nxs.h5\",\n", + " center_x=config[\"center_x\"],\n", + " center_y=config[\"center_y\"],\n", + " center_y_wing=config[\"center_y_wing\"],\n", + " output_workspace=\"CG3_957\",\n", + ")\n", "\n", "# Dark current\n", - "dark_ws = load_data(config['dark_current'],\n", - " center_x=config['center_x'],\n", - " center_y=config['center_y'],\n", - " center_y_wing=config['center_y_wing'],\n", - " output_workspace='CG3_dark')\n", + "dark_ws = load_data(\n", + " config[\"dark_current\"],\n", + " center_x=config[\"center_x\"],\n", + " center_y=config[\"center_y\"],\n", + " center_y_wing=config[\"center_y_wing\"],\n", + " output_workspace=\"CG3_dark\",\n", + ")\n", "biosans.subtract_dark_current(ws, dark_ws)\n", "\n", "# Normalization\n", "biosans.normalize_by_time(ws)\n", "\n", "# Solid angle\n", - "#biosans.solid_angle_correction(ws)\n", + "# biosans.solid_angle_correction(ws)\n", "\n", "# Sensitivity\n", - "drtsans.apply_sensitivity_correction(ws, sensitivity_filename=config['prepared_flood'])\n", + "drtsans.apply_sensitivity_correction(ws, sensitivity_filename=config[\"prepared_flood\"])\n", "\n", "# Transmission\n", "\n", "# Background\n", "\n", "# Save output\n", - "#api.SaveNexus(InputWorkspace=ws, Filename=\"/HFIR/CG3/IPTS-23782/shared/CG3_reduced_957.hf5\")" + "# api.SaveNexus(InputWorkspace=ws, Filename=\"/HFIR/CG3/IPTS-23782/shared/CG3_reduced_957.hf5\")" ] }, { @@ -232,16 +243,17 @@ "metadata": {}, "outputs": [], "source": [ - "BinningParams = namedtuple('BinningParams', 'min max bins')\n", + "BinningParams = namedtuple(\"BinningParams\", \"min max bins\")\n", "\n", - "q_data = drtsans.convert_to_q(ws, mode='scalar')\n", + "q_data = drtsans.convert_to_q(ws, mode=\"scalar\")\n", "\n", - "iq_output = biosans.bin_intensity_into_q1d(q_data, bin_params=BinningParams(min=0.001, max=0.25, bins=200),\n", - " linear_binning=False)#, bin_method=1)\n", + "iq_output = biosans.bin_intensity_into_q1d(\n", + " q_data, bin_params=BinningParams(min=0.001, max=0.25, bins=200), linear_binning=False\n", + ") # , bin_method=1)\n", "\n", "print(iq_output.wavelength)\n", "fig, ax = plt.subplots()\n", - "ax.errorbar(iq_output.mod_q, iq_output.intensity, yerr=iq_output.error, label=\"AgBeh\")\n" + "ax.errorbar(iq_output.mod_q, iq_output.intensity, yerr=iq_output.error, label=\"AgBeh\")" ] }, { diff --git a/tests/examples/EQSANS_porasil.ipynb b/tests/examples/EQSANS_porasil.ipynb index 1f4ba214f..52cd01769 100644 --- a/tests/examples/EQSANS_porasil.ipynb +++ b/tests/examples/EQSANS_porasil.ipynb @@ -16,10 +16,12 @@ "import os\n", "import numpy as np\n", "import warnings\n", - "warnings.filterwarnings('ignore', module='numpy')\n", - "warnings.filterwarnings('ignore')\n", + "\n", + "warnings.filterwarnings(\"ignore\", module=\"numpy\")\n", + "warnings.filterwarnings(\"ignore\")\n", "\n", "import matplotlib.pyplot as plt\n", + "\n", "%matplotlib notebook" ] }, @@ -54,17 +56,17 @@ "metadata": {}, "outputs": [], "source": [ - "config = dict(#mask=\"data/EQSANS_88980_mask.xml\",\n", - " bin_width=0.5,\n", - " low_tof_clip=500,\n", - " high_tof_clip=2000,\n", - " detector_offset=0,\n", - " sample_offset=340,\n", - " flux_method='proton charge',\n", - " flux=\"/SNS/EQSANS/shared/instrument_configuration/bl6_flux_at_sample\",\n", - " sensitivity_file_path=\"/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017A_mp/Sensitivity_patched_thinPMMA_4m_79165_event.nxs\",\n", - " dark_current=\"/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017B_mp/EQSANS_86275.nxs.h5\",\n", - " )\n", + "config = dict( # mask=\"data/EQSANS_88980_mask.xml\",\n", + " bin_width=0.5,\n", + " low_tof_clip=500,\n", + " high_tof_clip=2000,\n", + " detector_offset=0,\n", + " sample_offset=340,\n", + " flux_method=\"proton charge\",\n", + " flux=\"/SNS/EQSANS/shared/instrument_configuration/bl6_flux_at_sample\",\n", + " sensitivity_file_path=\"/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017A_mp/Sensitivity_patched_thinPMMA_4m_79165_event.nxs\",\n", + " dark_current=\"/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017B_mp/EQSANS_86275.nxs.h5\",\n", + ")\n", "\n", "mask_file = \"/SNS/users/m2d/git/analysis_playground/eqsans/notebooks/data/EQSANS_88980_mask.xml\"" ] @@ -85,16 +87,16 @@ "source": [ "# The new way of dealing with the beam center is in real space, relative to the center of the detector.\n", "# The EQSANS detector is 192 x 256 pixels, and the pixel sizes are 5.5 mm x 4.3 mm\n", - "x_center = -(192/2.0 - 90.93) * 0.0055\n", - "y_center = (256/2.0 - 131.47) * 0.0043\n", + "x_center = -(192 / 2.0 - 90.93) * 0.0055\n", + "y_center = (256 / 2.0 - 131.47) * 0.0043\n", "print(\"Old reduction's beam center in real space: %g %g\" % (x_center, y_center))\n", "\n", "db_ws = eqsans.load_events(\"EQSANS_88973\")\n", "center = eqsans.center_detector(db_ws)\n", "print(\"Beam center found: %g %g\" % (center[0], center[1]))\n", "\n", - "config['x_center'] = x_center\n", - "config['y_center'] = y_center" + "config[\"x_center\"] = x_center\n", + "config[\"y_center\"] = y_center" ] }, { @@ -122,7 +124,7 @@ "if apply_transmission:\n", " ws_tr_sample = eqsans.prepare_data(\"EQSANS_88975\", **config)\n", " ws_tr_direct = eqsans.prepare_data(\"EQSANS_88973\", **config)\n", - " tr_ws = eqsans.calculate_transmission(ws_tr_sample, ws_tr_direct, radius=None, radius_unit='mm')\n", + " tr_ws = eqsans.calculate_transmission(ws_tr_sample, ws_tr_direct, radius=None, radius_unit=\"mm\")\n", " ws = eqsans.apply_transmission_correction(ws, trans_workspace=tr_ws)\n", "\n", "# Background\n", @@ -133,7 +135,7 @@ " ws_tr_back = eqsans.prepare_data(\"EQSANS_88974\", **config)\n", " ws_tr_direct = eqsans.prepare_data(\"EQSANS_88973\", **config)\n", "\n", - " tr_ws = eqsans.calculate_transmission(ws_tr_back, ws_tr_direct, radius=None, radius_unit='mm')\n", + " tr_ws = eqsans.calculate_transmission(ws_tr_back, ws_tr_direct, radius=None, radius_unit=\"mm\")\n", " ws_bck = eqsans.apply_transmission_correction(ws_bck, trans_workspace=tr_ws)\n", "\n", "ws = eqsans.subtract_background(ws, background=ws_bck)\n", @@ -149,7 +151,7 @@ "outputs": [], "source": [ "%%time\n", - "table_ws_list = eqsans.prepare_momentum_transfer(ws, wavelength_binning=[config['bin_width']])" + "table_ws_list = eqsans.prepare_momentum_transfer(ws, wavelength_binning=[config[\"bin_width\"]])" ] }, { @@ -162,7 +164,7 @@ "iq_ws_f1 = eqsans.cal_iq(table_ws_list[0], bins=100, log_binning=True)\n", "\n", "# Save output\n", - "filepath = os.path.join(os.path.expanduser('~'), iq_ws_f1.name()+'.txt')\n", + "filepath = os.path.join(os.path.expanduser(\"~\"), iq_ws_f1.name() + \".txt\")\n", "api.SaveAscii(iq_ws_f1.name(), Filename=filepath, WriteSpectrumID=False, WriteXError=True)" ] }, @@ -176,7 +178,7 @@ "if len(table_ws_list) == 2:\n", " iq_ws_f2 = eqsans.cal_iq(table_ws_list[1], bins=150, log_binning=True)\n", "\n", - " filepath = os.path.join(os.path.expanduser('~'), iq_ws_f2.name()+'.txt')\n", + " filepath = os.path.join(os.path.expanduser(\"~\"), iq_ws_f2.name() + \".txt\")\n", " api.SaveAscii(iq_ws_f2.name(), Filename=filepath, WriteSpectrumID=False, WriteXError=True)" ] }, @@ -198,24 +200,24 @@ "source": [ "scale_match = 300000\n", "\n", - "data_dir = os.path.expanduser('~')\n", - "reduced_f1 = np.loadtxt(os.path.join(data_dir, 'EQSANS_88980_frame1_iq.txt'), delimiter=',')\n", - "reduced_f2 = np.loadtxt(os.path.join(data_dir, 'EQSANS_88980_frame2_iq.txt'), delimiter=',')\n", + "data_dir = os.path.expanduser(\"~\")\n", + "reduced_f1 = np.loadtxt(os.path.join(data_dir, \"EQSANS_88980_frame1_iq.txt\"), delimiter=\",\")\n", + "reduced_f2 = np.loadtxt(os.path.join(data_dir, \"EQSANS_88980_frame2_iq.txt\"), delimiter=\",\")\n", "\n", "fig, ax = plt.subplots()\n", - "ax.errorbar(reduced_f1.T[0], reduced_f1.T[1]/scale_match, yerr=reduced_f1.T[2]/scale_match, label=\"New Frame #1\")\n", - "ax.errorbar(reduced_f2.T[0], reduced_f2.T[1]/scale_match, yerr=reduced_f2.T[2]/scale_match, label=\"New Frame #2\")\n", + "ax.errorbar(reduced_f1.T[0], reduced_f1.T[1] / scale_match, yerr=reduced_f1.T[2] / scale_match, label=\"New Frame #1\")\n", + "ax.errorbar(reduced_f2.T[0], reduced_f2.T[1] / scale_match, yerr=reduced_f2.T[2] / scale_match, label=\"New Frame #2\")\n", "\n", "# To compare to reference data, execute the EQSANS_porasil.py script on one of the\n", "# analysis computers to create the reference data.\n", - "if os.path.exists(os.path.join(data_dir, 'EQSANS_88980_frame1_iq.txt')):\n", - " ref_f1 = np.loadtxt(os.path.join(data_dir, 'EQSANS_88980_frame1_iq_ref.txt'), delimiter=',')\n", - " ref_f2 = np.loadtxt(os.path.join(data_dir, 'EQSANS_88980_frame2_iq_ref.txt'), delimiter=',')\n", + "if os.path.exists(os.path.join(data_dir, \"EQSANS_88980_frame1_iq.txt\")):\n", + " ref_f1 = np.loadtxt(os.path.join(data_dir, \"EQSANS_88980_frame1_iq_ref.txt\"), delimiter=\",\")\n", + " ref_f2 = np.loadtxt(os.path.join(data_dir, \"EQSANS_88980_frame2_iq_ref.txt\"), delimiter=\",\")\n", " ax.errorbar(ref_f1.T[0], ref_f1.T[1], yerr=ref_f1.T[2], label=\"Old Frame #1\")\n", " ax.errorbar(ref_f2.T[0], ref_f2.T[1], yerr=ref_f2.T[2], label=\"Old Frame #2\")\n", "\n", - "ax.set_yscale('log')\n", - "ax.set_xscale('log')\n", + "ax.set_yscale(\"log\")\n", + "ax.set_xscale(\"log\")\n", "ax.legend()" ] } diff --git a/tests/examples/EQSANS_porasil.py b/tests/examples/EQSANS_porasil.py index a7413cc91..d92719c83 100644 --- a/tests/examples/EQSANS_porasil.py +++ b/tests/examples/EQSANS_porasil.py @@ -1,5 +1,5 @@ """ - EQSANS example for the legacy reduction +EQSANS example for the legacy reduction """ # flake8: noqa diff --git a/tests/examples/porasil.py b/tests/examples/porasil.py index 61f27edcd..2baf20e44 100644 --- a/tests/examples/porasil.py +++ b/tests/examples/porasil.py @@ -1,7 +1,7 @@ # flake8: noqa """ - The following is a real-life example of an EQSANS reduction script. - It uses the current Mantid reduction for EQSANS. +The following is a real-life example of an EQSANS reduction script. +It uses the current Mantid reduction for EQSANS. """ # EQSANS reduction script # Script automatically generated on Fri Mar 3 12:00:50 2017 diff --git a/tests/integration/drtsans/mono/biosans/test_load.py b/tests/integration/drtsans/mono/biosans/test_load.py index 740d79378..76e70de00 100644 --- a/tests/integration/drtsans/mono/biosans/test_load.py +++ b/tests/integration/drtsans/mono/biosans/test_load.py @@ -65,15 +65,15 @@ def test_load_all_files(has_sns_mount, reference_dir): # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/issues/542#note_156296 for ws in [sample_run, beam_center_run, bkgd_run, empty_trans_run]: sample_pos_z = ws.getInstrument().getSample().getPos()[2] - assert sample_pos_z == pytest.approx( - -0.12952, 0.000004 - ), "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + assert sample_pos_z == pytest.approx(-0.12952, 0.000004), ( + "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + ) for ws in [dark_run]: sample_pos_z = ws.getInstrument().getSample().getPos()[2] - assert sample_pos_z == pytest.approx( - 0.0000, 0.000004 - ), "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + assert sample_pos_z == pytest.approx(0.0000, 0.000004), ( + "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + ) # Verify sample to detector distance with default setup: # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/issues/542#note_156296 @@ -109,10 +109,10 @@ def test_load_all_files(has_sns_mount, reference_dir): assert wave_length == pytest.approx( 1.23, 1.0e-7 ), "{}-th workspace: wave length {} shall be equal to 1.23 angstrom".format(ws_index, wave_length) - assert wave_length_spread == pytest.approx( - 0.46, 1.0e-7 - ), "{}-th workspace: wave length spread {} shall be equal to 0.46 angstrom" "".format( - ws_index, wave_length_spread + assert wave_length_spread == pytest.approx(0.46, 1.0e-7), ( + "{}-th workspace: wave length spread {} shall be equal to 0.46 angstrom" "".format( + ws_index, wave_length_spread + ) ) for ws_index, ws in enumerate([dark_run]): @@ -123,10 +123,10 @@ def test_load_all_files(has_sns_mount, reference_dir): assert wave_length == pytest.approx( 6.00881338, 1.0e-7 ), "{}-th workspace: wave length {} shall be equal to 6.00881338 angstrom".format(ws_index, wave_length) - assert wave_length_spread == pytest.approx( - 0.1323529411, 1.0e-7 - ), "{}-th workspace: wave length spread {} shall be equal to 0.13235294 angstrom" "".format( - ws_index, wave_length_spread + assert wave_length_spread == pytest.approx(0.1323529411, 1.0e-7), ( + "{}-th workspace: wave length spread {} shall be equal to 0.13235294 angstrom" "".format( + ws_index, wave_length_spread + ) ) # Verify that if some meta-data is changed that it gets applied correctly on reload, use thickness as test diff --git a/tests/integration/drtsans/mono/gpsans/test_find_beam_center.py b/tests/integration/drtsans/mono/gpsans/test_find_beam_center.py index d989ae0ad..cb5c20f11 100644 --- a/tests/integration/drtsans/mono/gpsans/test_find_beam_center.py +++ b/tests/integration/drtsans/mono/gpsans/test_find_beam_center.py @@ -38,9 +38,9 @@ def test_gpsans_find_beam_center(datarepo_dir): center_x, center_y, _ = beam_center beam_center_shift = np.sqrt((center_x - det_center[0]) ** 2 + (center_y - det_center[1]) ** 2) - assert beam_center_shift == pytest.approx( - 0.400, abs=0.007 - ), "Beam center shift {} to {} is beyond" "0.4 +/- 7E-3".format(beam_center, det_center) + assert beam_center_shift == pytest.approx(0.400, abs=0.007), ( + "Beam center shift {} to {} is beyond" "0.4 +/- 7E-3".format(beam_center, det_center) + ) # cleanup DeleteWorkspace(beam_center_ws) diff --git a/tests/integration/drtsans/mono/gpsans/test_load.py b/tests/integration/drtsans/mono/gpsans/test_load.py index b01191b91..447de0ceb 100644 --- a/tests/integration/drtsans/mono/gpsans/test_load.py +++ b/tests/integration/drtsans/mono/gpsans/test_load.py @@ -72,9 +72,9 @@ def test_load_all_files(datarepo_dir): # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/issues/542#note_156296 for ws in [sample_run, sample_trans_run, bkgd_run, bkgd_trans_run]: sample_pos_z = ws.getInstrument().getSample().getPos()[2] - assert sample_pos_z == pytest.approx( - -0.23456, 0.000004 - ), "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + assert sample_pos_z == pytest.approx(-0.23456, 0.000004), ( + "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + ) # Verify sample to detector distance with default setup: # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/issues/542#note_156296 @@ -105,10 +105,10 @@ def test_load_all_files(datarepo_dir): assert wave_length == pytest.approx( 1.23, 1.0e-7 ), "{}-th workspace: wave length {} shall be equal to 1.23 angstrom".format(ws_index, wave_length) - assert wave_length_spread == pytest.approx( - 0.1323529411, 1.0e-7 - ), "{}-th workspace: wave length spread {} shall be equal to 0.46 angstrom" "".format( - ws_index, wave_length_spread + assert wave_length_spread == pytest.approx(0.1323529411, 1.0e-7), ( + "{}-th workspace: wave length spread {} shall be equal to 0.46 angstrom" "".format( + ws_index, wave_length_spread + ) ) # Verify that if some meta-data is changed that it gets applied correctly on reload, use thickness as test diff --git a/tests/integration/drtsans/mono/test_load.py b/tests/integration/drtsans/mono/test_load.py index 013568531..df1e965d5 100644 --- a/tests/integration/drtsans/mono/test_load.py +++ b/tests/integration/drtsans/mono/test_load.py @@ -35,8 +35,7 @@ def test_load_gpsans(datarepo_dir, clean_workspace): print("[TEST INFO] SampleToSi = {} mm".format(logs.find_log_with_units("CG2:CS:SampleToSi", unit="mm"))) raw_sample_det_distance = sample_detector_distance(ws, unit="m", search_logs=False) print( - "[TEST INFO] Sample to detector distance = {} /{} meter" - "".format( + "[TEST INFO] Sample to detector distance = {} /{} meter" "".format( raw_sample_det_distance, sample_detector_distance(ws, unit="m", log_key="sample_detector_distance", search_logs=True), ) @@ -93,8 +92,7 @@ def test_load_biosans(datarepo_dir, clean_workspace): print("[TEST INFO] (Raw) sampleToSi = {} mm".format(logs.find_log_with_units("CG3:CS:SampleToSi", unit="mm"))) raw_sample_det_distance = sample_detector_distance(ws) print( - "[TEST INFO] (Raw) sample to detector distance = {} /{} meter" - "".format( + "[TEST INFO] (Raw) sample to detector distance = {} /{} meter" "".format( raw_sample_det_distance, sample_detector_distance(ws, log_key="sample_detector_distance", search_logs=True), ) @@ -270,8 +268,7 @@ def test_load_biosans_overwrite_sdd(datarepo_dir, clean_workspace): print("[TEST INFO] SampleToSi = {} mm".format(logs.find_log_with_units("CG3:CS:SampleToSi", unit="mm"))) raw_sample_det_distance = sample_detector_distance(ws) print( - "[TEST INFO] Sample to detector distance = {} /{} meter" - "".format( + "[TEST INFO] Sample to detector distance = {} /{} meter" "".format( raw_sample_det_distance, sample_detector_distance(ws, log_key="sample_detector_distance", search_logs=True), ) diff --git a/tests/integration/drtsans/test_prepare_sensitivities.py b/tests/integration/drtsans/test_prepare_sensitivities.py index f39258257..24f117537 100644 --- a/tests/integration/drtsans/test_prepare_sensitivities.py +++ b/tests/integration/drtsans/test_prepare_sensitivities.py @@ -1,5 +1,5 @@ """ - Test EASANS sensitivities preparation algorithm +Test EASANS sensitivities preparation algorithm """ import pytest diff --git a/tests/integration/drtsans/tof/eqsans/test_elastic_and_inelastic_corrections.py b/tests/integration/drtsans/tof/eqsans/test_elastic_and_inelastic_corrections.py index 0b99c997d..3e253b1a2 100644 --- a/tests/integration/drtsans/tof/eqsans/test_elastic_and_inelastic_corrections.py +++ b/tests/integration/drtsans/tof/eqsans/test_elastic_and_inelastic_corrections.py @@ -406,9 +406,9 @@ def _run_reduction_and_compare(config, expected_result_basename): # Override common configuration values configuration["configuration"]["outputDir"] = test_dir configuration["dataDirectories"] = os.path.join(datarepo_dir.eqsans, "test_corrections") - configuration["configuration"][ - "darkFileName" - ] = "/bin/true" # so that it will pass the validator, later set to None + configuration["configuration"]["darkFileName"] = ( + "/bin/true" # so that it will pass the validator, later set to None + ) configuration["configuration"]["sensitivityFileName"] = os.path.join( datarepo_dir.eqsans, "test_corrections", "Sensitivity_patched_thinPMMA_4m_129610.nxs" ) diff --git a/tests/integration/drtsans/tof/eqsans/test_integration_api.py b/tests/integration/drtsans/tof/eqsans/test_integration_api.py index fca2fc722..1e9e02886 100644 --- a/tests/integration/drtsans/tof/eqsans/test_integration_api.py +++ b/tests/integration/drtsans/tof/eqsans/test_integration_api.py @@ -1,5 +1,5 @@ """ - Test top-level API +Test top-level API """ from os.path import join as pj diff --git a/tests/integration/drtsans/tof/eqsans/test_simulated_events.py b/tests/integration/drtsans/tof/eqsans/test_simulated_events.py index 73d0ac90a..36c6aab8c 100644 --- a/tests/integration/drtsans/tof/eqsans/test_simulated_events.py +++ b/tests/integration/drtsans/tof/eqsans/test_simulated_events.py @@ -717,8 +717,8 @@ def _mock_transform_to_wavelength(*args, **kwargs): # log time-of-flight relevant info (needed for dark-current correction) low_tof_clip, high_tof_clip = kwargs["low_tof_clip"], kwargs["high_tof_clip"] pulse_period = metadata["pulse_period"] - sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"), - sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"), + (sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"),) + (sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"),) sample_logs.insert("tof_frame_width", pulse_period, unit="ms") tof_width_clipped = pulse_period - low_tof_clip - high_tof_clip sample_logs.insert("tof_frame_width_clipped", tof_width_clipped, unit="ms") @@ -809,8 +809,8 @@ def _mock_transform_to_wavelength(*args, **kwargs): # log time-of-flight relevant info (needed for dark-current correction) low_tof_clip, high_tof_clip = kwargs["low_tof_clip"], kwargs["high_tof_clip"] pulse_period = metadata["pulse_period"] - sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"), - sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"), + (sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"),) + (sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"),) sample_logs.insert("tof_frame_width", pulse_period, unit="ms") tof_width_clipped = pulse_period - low_tof_clip - high_tof_clip sample_logs.insert("tof_frame_width_clipped", tof_width_clipped, unit="ms") @@ -915,8 +915,8 @@ def _mock_transform_to_wavelength(*args, **kwargs): # log time-of-flight relevant info (needed for dark-current correction) low_tof_clip, high_tof_clip = kwargs["low_tof_clip"], kwargs["high_tof_clip"] pulse_period = metadata["pulse_period"] - sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"), - sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"), + (sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"),) + (sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"),) sample_logs.insert("tof_frame_width", pulse_period, unit="ms") tof_width_clipped = pulse_period - low_tof_clip - high_tof_clip sample_logs.insert("tof_frame_width_clipped", tof_width_clipped, unit="ms") diff --git a/tests/unit/drtsans/mono/gpsans/test_polarization.py b/tests/unit/drtsans/mono/gpsans/test_polarization.py index d013273db..97215e147 100644 --- a/tests/unit/drtsans/mono/gpsans/test_polarization.py +++ b/tests/unit/drtsans/mono/gpsans/test_polarization.py @@ -46,7 +46,9 @@ def test_half_polarization(temp_workspace_name): # expected results SpinUpExp = CreateSingleValuedWorkspace( - DataValue=10050.100, ErrorValue=103.2046, OutputWorkspace=temp_workspace_name() # was 103.205 + DataValue=10050.100, + ErrorValue=103.2046, + OutputWorkspace=temp_workspace_name(), # was 103.205 ) SpinDownExp = CreateSingleValuedWorkspace( DataValue=8046.0925, ErrorValue=93.2163, OutputWorkspace=temp_workspace_name() diff --git a/tests/unit/drtsans/test_event_nexus_nodes.py b/tests/unit/drtsans/test_event_nexus_nodes.py index 2cbaf213f..526e6d3e2 100644 --- a/tests/unit/drtsans/test_event_nexus_nodes.py +++ b/tests/unit/drtsans/test_event_nexus_nodes.py @@ -32,7 +32,7 @@ def test_create_monitor_node(datarepo_dir): bank9_entry = nexus_h5["/entry/monitor1"] event_indexes = bank9_entry["event_index"][()] event_time_offsets = bank9_entry["event_time_offset"][()] - event_time_zeros = bank9_entry["event_time_zero"][(())] + event_time_zeros = bank9_entry["event_time_zero"][()] run_start_time = bank9_entry["event_time_zero"].attrs["offset"].decode() # check type @@ -74,7 +74,7 @@ def test_create_events_node(datarepo_dir): event_ids = bank9_entry["event_id"][()] event_indexes = bank9_entry["event_index"][()] event_time_offsets = bank9_entry["event_time_offset"][()] - event_time_zeros = bank9_entry["event_time_zero"][(())] + event_time_zeros = bank9_entry["event_time_zero"][()] run_start_time = bank9_entry["event_time_zero"].attrs["offset"].decode() # check type diff --git a/tests/unit/drtsans/test_i_of_q_2d_binning.py b/tests/unit/drtsans/test_i_of_q_2d_binning.py index e78611a8f..ced1791a2 100644 --- a/tests/unit/drtsans/test_i_of_q_2d_binning.py +++ b/tests/unit/drtsans/test_i_of_q_2d_binning.py @@ -67,12 +67,12 @@ def test_2d_bin_no_sub_no_wt(): assert binned_iq_2d.error[1][1] == pytest.approx(4.725815626, abs=1e-8), "sigma I(Qx, Qy) is incorrect" # verify dQx and dQy - assert binned_iq_2d.delta_qx[1][1] == pytest.approx( - 0.00816, abs=1e-5 - ), "dQx {} is incorrect comparing to {}." "".format(binned_iq_2d[2][1][1], 0.00816) - assert binned_iq_2d.delta_qy[1][1] == pytest.approx( - 0.00816, abs=1e-5 - ), "dQy {}is incorrect comparing to {}." "".format(binned_iq_2d[3][1][1], 0.00816) + assert binned_iq_2d.delta_qx[1][1] == pytest.approx(0.00816, abs=1e-5), ( + "dQx {} is incorrect comparing to {}." "".format(binned_iq_2d[2][1][1], 0.00816) + ) + assert binned_iq_2d.delta_qy[1][1] == pytest.approx(0.00816, abs=1e-5), ( + "dQy {}is incorrect comparing to {}." "".format(binned_iq_2d[3][1][1], 0.00816) + ) # verify Qx and Qy on off diagonal values # Qx in row 0 shall be all same as qx bin center [1] @@ -159,12 +159,12 @@ def test_2d_bin_no_sub_no_wt_wavelength(): assert binned_iq_2d.error[1][1] == pytest.approx(4.725815626, abs=1e-8), "sigma I(Qx, Qy) is incorrect" # verify dQx and dQy - assert binned_iq_2d.delta_qx[1][1] == pytest.approx( - 0.00816, abs=1e-5 - ), "dQx {} is incorrect comparing to {}." "".format(binned_iq_2d[2][1][1], 0.00816) - assert binned_iq_2d.delta_qy[1][1] == pytest.approx( - 0.00816, abs=1e-5 - ), "dQy {}is incorrect comparing to {}." "".format(binned_iq_2d[3][1][1], 0.00816) + assert binned_iq_2d.delta_qx[1][1] == pytest.approx(0.00816, abs=1e-5), ( + "dQx {} is incorrect comparing to {}." "".format(binned_iq_2d[2][1][1], 0.00816) + ) + assert binned_iq_2d.delta_qy[1][1] == pytest.approx(0.00816, abs=1e-5), ( + "dQy {}is incorrect comparing to {}." "".format(binned_iq_2d[3][1][1], 0.00816) + ) # verify Qx and Qy on off diagonal values # Qx in row 0 shall be all same as qx bin center [1] diff --git a/tests/unit/drtsans/tof/eqsans/test_resolution.py b/tests/unit/drtsans/tof/eqsans/test_resolution.py index 5b977f56d..03a5cc252 100644 --- a/tests/unit/drtsans/tof/eqsans/test_resolution.py +++ b/tests/unit/drtsans/tof/eqsans/test_resolution.py @@ -1,5 +1,5 @@ """ - Test EQSANS resolution +Test EQSANS resolution """ import numpy as np diff --git a/tests/unit/drtsans/tof/eqsans/test_transmission.py b/tests/unit/drtsans/tof/eqsans/test_transmission.py index d1f45674a..3c34375b9 100644 --- a/tests/unit/drtsans/tof/eqsans/test_transmission.py +++ b/tests/unit/drtsans/tof/eqsans/test_transmission.py @@ -82,8 +82,8 @@ def test_fit_raw(trasmission_data, clean_workspace): """ # Non-skip mode fitting_results = fit_raw_transmission(trasmission_data.raw, output_workspace=mtd.unique_hidden_name()) - clean_workspace(fitting_results.transmission), - clean_workspace(fitting_results.lead_transmission), + (clean_workspace(fitting_results.transmission),) + (clean_workspace(fitting_results.lead_transmission),) clean_workspace(fitting_results.lead_mantid_fit.OutputWorkspace) clean_workspace(fitting_results.lead_mantid_fit.OutputNormalisedCovarianceMatrix) clean_workspace(fitting_results.lead_mantid_fit.OutputParameters) @@ -91,8 +91,8 @@ def test_fit_raw(trasmission_data, clean_workspace): # Frame-skipping mode fitting_results = fit_raw_transmission(trasmission_data.raw_skip, output_workspace=mtd.unique_hidden_name()) - clean_workspace(fitting_results.transmission), - clean_workspace(fitting_results.lead_transmission), + (clean_workspace(fitting_results.transmission),) + (clean_workspace(fitting_results.lead_transmission),) clean_workspace(fitting_results.lead_mantid_fit.OutputWorkspace) clean_workspace(fitting_results.lead_mantid_fit.OutputNormalisedCovarianceMatrix) clean_workspace(fitting_results.lead_mantid_fit.OutputParameters) From 715faa5ca3a7756cb5d306ab3b2c944480999d4e Mon Sep 17 00:00:00 2001 From: "Kevin A. Tactac" Date: Wed, 18 Dec 2024 13:59:58 -0500 Subject: [PATCH 07/23] fix codecov step --- .github/workflows/test.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index a2d12eab6..ecb6a3996 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -36,9 +36,10 @@ jobs: git submodule update --init python -m pytest --cov=src --cov-report=xml --cov-report=term-missing tests/integration/ mv .coverage .coverage.d/integration - - name: upload coverage to codecov + - name: combine coverage reports run: | - coverage combine .coverage.d/ + coverage combine .coverage.d/ --output=.coverage.d/merged + - name: upload coverage to codecov uses: codecov/codecov-action@v5 if: github.actor != 'dependabot[bot]' From b719e67db8b04f52d3ddfcfd02c77009255108eb Mon Sep 17 00:00:00 2001 From: "Kevin A. Tactac" Date: Wed, 18 Dec 2024 15:05:38 -0500 Subject: [PATCH 08/23] New test prep stage --- .github/workflows/test.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index ecb6a3996..c53c9b0fa 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -24,16 +24,18 @@ jobs: - name: install additional dependencies run: | echo "installing additional dependencies if cannot be installed from conda" + - name: test preparation + run: | + git submodule update --init + mkdir .coverage.d - name: run unit tests run: | echo "running unit tests" python -m pytest --cov=src --cov-report=xml --cov-report=term-missing tests/unit/ - mkdir .coverage.d mv .coverage .coverage.d/unit - name: run integration tests run: | echo "running integration tests" - git submodule update --init python -m pytest --cov=src --cov-report=xml --cov-report=term-missing tests/integration/ mv .coverage .coverage.d/integration - name: combine coverage reports From 8ee3110b65b40cf5b9e731814fc99045db6bd89f Mon Sep 17 00:00:00 2001 From: "Kevin A. Tactac" Date: Wed, 18 Dec 2024 15:14:44 -0500 Subject: [PATCH 09/23] change data submodule url --- .gitmodules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitmodules b/.gitmodules index 980b73347..6b087762b 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,4 +1,4 @@ [submodule "tests/data/drtsans-data"] path = tests/data/drtsans-data - url = ../../infrastructure/test-data/drtsans-data.git + url = https://code.ornl.gov/sns-hfir-scse/infrastructure/test-data/drtsans-data.git branch = main From 672722566dc1e6208955f58e20713337cf248cd0 Mon Sep 17 00:00:00 2001 From: "Kevin A. Tactac" Date: Wed, 18 Dec 2024 15:33:29 -0500 Subject: [PATCH 10/23] specify cov files --- .github/workflows/test.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index c53c9b0fa..dd069c678 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -29,15 +29,17 @@ jobs: git submodule update --init mkdir .coverage.d - name: run unit tests + env: + COVERAGE_FILE: .coverage.d/unit run: | echo "running unit tests" python -m pytest --cov=src --cov-report=xml --cov-report=term-missing tests/unit/ - mv .coverage .coverage.d/unit - name: run integration tests + env: + COVERAGE_FILE: .coverage.d/integration run: | echo "running integration tests" python -m pytest --cov=src --cov-report=xml --cov-report=term-missing tests/integration/ - mv .coverage .coverage.d/integration - name: combine coverage reports run: | coverage combine .coverage.d/ --output=.coverage.d/merged From 889145fd10500c2baa33d56a9fc8df066667014b Mon Sep 17 00:00:00 2001 From: glass-ships Date: Thu, 19 Dec 2024 11:07:31 -0500 Subject: [PATCH 11/23] add some noqa --- pyproject.toml | 2 +- src/drtsans/tof/eqsans/correct_frame.py | 2 +- src/drtsans/wavelength.py | 4 ++-- tests/conftest.py | 17 +++++++---------- tests/unit/drtsans/test_settings.py | 8 ++++---- 5 files changed, 15 insertions(+), 18 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index befeebadb..c0e65a06d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -86,7 +86,7 @@ line-length = 119 [tool.ruff] cache-dir = "/tmp/ruff_cache" line-length = 119 -extend-exclude = ["notebooks"] +extend-exclude = ["notebooks", "tests/examples"] [tool.ruff.lint] # https://beta.ruff.rs/docs/rules/ diff --git a/src/drtsans/tof/eqsans/correct_frame.py b/src/drtsans/tof/eqsans/correct_frame.py index e42029200..fc5adb567 100644 --- a/src/drtsans/tof/eqsans/correct_frame.py +++ b/src/drtsans/tof/eqsans/correct_frame.py @@ -408,7 +408,7 @@ def correct_emission_time(input_workspace): SetInstrumentParameter( Workspace=input_workspace, ParameterName="t0_formula", - Value="incidentEnergy=sqrt(81.80420249996277/incidentEnergy), (incidentEnergy < 2.0) ? 0.5*(1280.5-7448.4*incidentEnergy+16509*incidentEnergy^2-17872*incidentEnergy^3+10445*incidentEnergy^4-3169.3*incidentEnergy^5+392.31*incidentEnergy^6) : 0.5*(231.99+6.4797*incidentEnergy-0.5233*incidentEnergy^2+0.0148*incidentEnergy^3)", + Value="incidentEnergy=sqrt(81.80420249996277/incidentEnergy), (incidentEnergy < 2.0) ? 0.5*(1280.5-7448.4*incidentEnergy+16509*incidentEnergy^2-17872*incidentEnergy^3+10445*incidentEnergy^4-3169.3*incidentEnergy^5+392.31*incidentEnergy^6) : 0.5*(231.99+6.4797*incidentEnergy-0.5233*incidentEnergy^2+0.0148*incidentEnergy^3)", # noqa: E501 ) ModeratorTzero( InputWorkspace=input_workspace, diff --git a/src/drtsans/wavelength.py b/src/drtsans/wavelength.py index ccd322af9..566f21b7a 100644 --- a/src/drtsans/wavelength.py +++ b/src/drtsans/wavelength.py @@ -143,7 +143,7 @@ def __imul__(self, other): as in the intersection between Wband(0, 1) and Wband(1, 2). """ b = self * other - self = b + self = b # noqa: PLW0642 (reassigning self) return self def __eq__(self, other): @@ -288,7 +288,7 @@ def __imul__(self, other): as in the intersection between Wband(0, 1) and Wband(1, 2). """ wb = self * other - self = wb + self = wb # noqa: PLW0642 (reassigning self) return self def __str__(self): diff --git a/tests/conftest.py b/tests/conftest.py index c1650849a..c008da084 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -424,18 +424,15 @@ def porasil_slice1m(reference_dir): # Absolute path to benchmark files f = dict( s=pjoin(reference_dir.eqsans, "EQSANS_92164.nxs.h5"), # sample - m=pjoin(reference_dir.eqsans, "2017B_mp/beamstop60_mask_4m.nxs"), # noqa: E501 mask + m=pjoin(reference_dir.eqsans, "2017B_mp/beamstop60_mask_4m.nxs"), # mask dc=pjoin(reference_dir.eqsans, "EQSANS_89157.nxs.h5"), # dark current - se=pjoin( - reference_dir.eqsans, - "Sensitivity_patched_thinPMMA_1o3m_87680_event.nxs", - ), # noqa: E501 - dbc=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # noqa: E501 direct_beam_center - dbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # noqa: E501 direct beam transmission sample - dbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # noqa: E501 direct beam transmission empty + se=pjoin(reference_dir.eqsans, "Sensitivity_patched_thinPMMA_1o3m_87680_event.nxs"), # sensitivity + dbc=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # direct_beam_center + dbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # direct beam transmission sample + dbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # direct beam transmission empty b=pjoin(reference_dir.eqsans, "EQSANS_92163.nxs.h5"), # background - bdbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # noqa: E501 background direct beam transmission sample - bdbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # noqa: E501 background_direct_beam_transmission_empty + bdbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # background direct beam transmission sample + bdbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # background_direct_beam_transmission_empty ) lds = dict( diff --git a/tests/unit/drtsans/test_settings.py b/tests/unit/drtsans/test_settings.py index 2243f402f..406fc0c08 100644 --- a/tests/unit/drtsans/test_settings.py +++ b/tests/unit/drtsans/test_settings.py @@ -16,10 +16,10 @@ def goo(x): y2 = foo(41) z2 = goo(21) - assert type(y1) == type(y2) - assert type(z1) == type(z2) - assert type(y1) != type(z1) - assert type(y2) != type(z2) + assert type(y1) is type(y2) + assert type(z1) is type(z2) + assert type(y1) is not type(z1) + assert type(y2) is not type(z2) def test_offline(): From ab800ebf14e93942ed1138f8ef58f8bc6e3b3e0a Mon Sep 17 00:00:00 2001 From: glass-ships Date: Thu, 19 Dec 2024 11:25:44 -0500 Subject: [PATCH 12/23] address lint errors --- src/drtsans/auto_wedge.py | 4 ++-- src/drtsans/pixel_calibration.py | 8 ++++---- src/drtsans/sensitivity.py | 2 +- src/drtsans/tof/eqsans/geometry.py | 5 +++-- tests/unit/drtsans/mono/test_spice_xml_parser.py | 4 ++-- 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/src/drtsans/auto_wedge.py b/src/drtsans/auto_wedge.py index cc03457f0..3a12efa31 100644 --- a/src/drtsans/auto_wedge.py +++ b/src/drtsans/auto_wedge.py @@ -244,9 +244,9 @@ def _export_to_h5(iq2d, rings, azimuthal_delta, peak_fit_dict, output_dir): function_data_set[0] = peak_fit_dict[index]["fit_function"] # add peak fitting result - for param_name in func_param_dict: + for param_name, param_value in func_param_dict.items(): # form data set - data_set = np.array(func_param_dict[param_name]) + data_set = np.array(param_value) fit_group.create_dataset(param_name, data=data_set) # close diff --git a/src/drtsans/pixel_calibration.py b/src/drtsans/pixel_calibration.py index 53c0ca60a..8807efa5f 100644 --- a/src/drtsans/pixel_calibration.py +++ b/src/drtsans/pixel_calibration.py @@ -1766,14 +1766,14 @@ def as_intensities(input_workspace, component="detector1", views=["positions", " intensities = np.zeros(number_histograms) returned_views = {} - for cal_prop in pixel_props: # 'positions', 'heights', 'widths', 'positions_mantid' - output_workspace = f"{str(input_workspace)}_{cal_prop}" # Workspace containing the property as intensity + for cal_prop_key, cal_prop_val in pixel_props.items(): # 'positions', 'heights', 'widths', 'positions_mantid' + output_workspace = f"{str(input_workspace)}_{cal_prop_key}" # Workspace containing the property as intensity # intensties will be non-zero only for workpace indexes that have associated pixels of interests - intensities[workspace_indexes] = pixel_props[cal_prop] + intensities[workspace_indexes] = cal_prop_val workspace = Integration(InputWorkspace=input_workspace, OutputWorkspace=output_workspace) for index in range(number_histograms): workspace.dataY(index)[:] = intensities[index] - returned_views[cal_prop] = mtd[output_workspace] + returned_views[cal_prop_key] = mtd[output_workspace] return returned_views diff --git a/src/drtsans/sensitivity.py b/src/drtsans/sensitivity.py index 9008f5723..b10359ee3 100644 --- a/src/drtsans/sensitivity.py +++ b/src/drtsans/sensitivity.py @@ -167,7 +167,7 @@ def mask_pixels_with_nan(sensitivity_workspace): ) # mask the "bad" pixels - temp_sensitivity = MaskDetectorsIf( + temp_sensitivity = MaskDetectorsIf( # noqa: F841 InputWorkspace=sensitivity_workspace, Operator="GreaterEqual", Value=BAD_PIXEL, diff --git a/src/drtsans/tof/eqsans/geometry.py b/src/drtsans/tof/eqsans/geometry.py index 5c1625804..634eb522d 100644 --- a/src/drtsans/tof/eqsans/geometry.py +++ b/src/drtsans/tof/eqsans/geometry.py @@ -163,9 +163,10 @@ def source_aperture(other, unit="m"): # Find the appropriate set of slit diameters run_number = int(sample_logs.run_number.value) - for start, end in index_to_diameters: + for key, val in index_to_diameters.items(): + start, end = key if start <= run_number <= end: - index_to_diameter = index_to_diameters[(start, end)] + index_to_diameter = val break # entries vBeamSlit, vBeamSlit2, and vBeamSlit3 contain the slit number, identifying the slit diameter diff --git a/tests/unit/drtsans/mono/test_spice_xml_parser.py b/tests/unit/drtsans/mono/test_spice_xml_parser.py index a8bfa999f..bac65909f 100644 --- a/tests/unit/drtsans/mono/test_spice_xml_parser.py +++ b/tests/unit/drtsans/mono/test_spice_xml_parser.py @@ -64,8 +64,8 @@ def test_get_das_logs(datarepo_dir, clean_workspace): LoadHFIRSANS(Filename=test_xml, OutputWorkspace=clean_workspace("SpiceXMLTest")) spice_ws = mtd["SpiceXMLTest"] - for das_log_name in das_log_values: - log_value, log_unit = das_log_values[das_log_name] + for das_log_name, das_log_value in das_log_values: + log_value, log_unit = das_log_value print(f"{das_log_name}: {log_value}, {log_unit}") if das_log_name in ["sample_detector_distance", "wavelength_spread"]: continue From 33b2c1b4146f9ef022bee1a50bf2f4d278a0ea5f Mon Sep 17 00:00:00 2001 From: glass-ships Date: Thu, 19 Dec 2024 11:55:32 -0500 Subject: [PATCH 13/23] forgot .items() in test spice xml parser --- tests/unit/drtsans/mono/test_spice_xml_parser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/unit/drtsans/mono/test_spice_xml_parser.py b/tests/unit/drtsans/mono/test_spice_xml_parser.py index bac65909f..21bc7f310 100644 --- a/tests/unit/drtsans/mono/test_spice_xml_parser.py +++ b/tests/unit/drtsans/mono/test_spice_xml_parser.py @@ -64,7 +64,7 @@ def test_get_das_logs(datarepo_dir, clean_workspace): LoadHFIRSANS(Filename=test_xml, OutputWorkspace=clean_workspace("SpiceXMLTest")) spice_ws = mtd["SpiceXMLTest"] - for das_log_name, das_log_value in das_log_values: + for das_log_name, das_log_value in das_log_values.items(): log_value, log_unit = das_log_value print(f"{das_log_name}: {log_value}, {log_unit}") if das_log_name in ["sample_detector_distance", "wavelength_spread"]: From b35a1524e7a5654869f75ec711ff208753075bdc Mon Sep 17 00:00:00 2001 From: glass-ships Date: Thu, 19 Dec 2024 12:09:03 -0500 Subject: [PATCH 14/23] try pip install in test preparation step --- .github/workflows/test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dd069c678..491324256 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -26,6 +26,7 @@ jobs: echo "installing additional dependencies if cannot be installed from conda" - name: test preparation run: | + pip install . git submodule update --init mkdir .coverage.d - name: run unit tests From 657f074a9021c09b6e0f40b00cff9535af5a5609 Mon Sep 17 00:00:00 2001 From: glass-ships Date: Thu, 6 Feb 2025 13:40:45 -0500 Subject: [PATCH 15/23] restore from next and exclude notebooks & test/examples --- .pre-commit-config.yaml | 6 + .../biosans_midrange_detector_barscan.ipynb | 3 +- ...ixel_calibration_22487_with_midrange.ipynb | 9 +- .../biosans_wing_detector_barscan.ipynb | 3 +- .../biosans_pixel_calibration_5767.ipynb | 9 +- ...pixel_calibration_5767_with_midrange.ipynb | 10 +- .../biosans_pixel_calibration_838.ipynb | 8 +- ...osans_pixel_calibration_838_midrange.ipynb | 9 +- .../gpsans/gpsans_pixel_calibration.ipynb | 4 + .../gpsans_pixel_calibration_9905.ipynb | 3 + .../gpsans/gpsans_reduction_1config.ipynb | 4 +- notebooks/tubewidth/gpsans_tubewidth.ipynb | 5 +- src/drtsans/momentum_transfer.py | 18 ++- src/drtsans/pixel_calibration.py | 8 +- src/drtsans/tof/eqsans/geometry.py | 5 +- src/drtsans/wavelength.py | 4 +- tests/conftest.py | 17 +-- tests/examples/BIOSANS.ipynb | 115 ++++++++---------- tests/examples/EQSANS_porasil.ipynb | 68 +++++------ tests/examples/EQSANS_porasil.py | 3 +- tests/examples/porasil.py | 4 +- 21 files changed, 169 insertions(+), 146 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b8b1bf4c0..2915613e3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -24,7 +24,13 @@ repos: hooks: - id: ruff args: [--no-cache, --fix, --exit-non-zero-on-fix] + exclude: | + notebooks/ + tests/examples/ - id: ruff-format + exclude: | + notebooks/ + tests/examples/ - repo: https://github.com/kynan/nbstripout rev: 0.8.1 hooks: diff --git a/notebooks/barscan/biosans_midrange_detector_barscan.ipynb b/notebooks/barscan/biosans_midrange_detector_barscan.ipynb index cd8d16fec..cbd70e1a8 100644 --- a/notebooks/barscan/biosans_midrange_detector_barscan.ipynb +++ b/notebooks/barscan/biosans_midrange_detector_barscan.ipynb @@ -15,10 +15,11 @@ "outputs": [], "source": [ "import os\n", + "import sys\n", "import time\n", "\n", "# Mantid imports\n", - "from mantid.simpleapi import LoadNexusProcessed, mtd\n", + "from mantid.simpleapi import LoadEventNexus, LoadNexusProcessed, SaveNexus, mtd\n", "\n", "# drtsans imports\n", "from drtsans.pixel_calibration import calculate_barscan_calibration, load_calibration, as_intensities\n", diff --git a/notebooks/barscan/biosans_pixel_calibration_22487_with_midrange.ipynb b/notebooks/barscan/biosans_pixel_calibration_22487_with_midrange.ipynb index 3c5bec081..530f99899 100644 --- a/notebooks/barscan/biosans_pixel_calibration_22487_with_midrange.ipynb +++ b/notebooks/barscan/biosans_pixel_calibration_22487_with_midrange.ipynb @@ -23,7 +23,7 @@ "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadNexus, SaveNexus\n", + "from mantid.simpleapi import LoadEventAsWorkspace2D, LoadNexus, SaveNexus\n", "from drtsans.mono.biosans import calculate_barscan_calibration, as_intensities, plot_detector\n", "from drtsans.pixel_calibration import Table\n", "\n", @@ -675,7 +675,8 @@ "outputs": [], "source": [ "import os\n", - "from drtsans.mono.biosans import load_calibration\n", + "from mantid.simpleapi import LoadEventNexus\n", + "from drtsans.mono.biosans import load_calibration, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -790,7 +791,7 @@ "import os\n", "import time\n", "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -899,6 +900,7 @@ }, "outputs": [], "source": [ + "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "start_time = time.time()\n", "calibration_main = calculate_apparent_tube_width(\n", @@ -994,6 +996,7 @@ "from mantid.api import mtd\n", "from mantid.simpleapi import CreateWorkspace, LoadNexus\n", "from drtsans.tubecollection import TubeCollection\n", + "from matplotlib import pyplot as plt\n", "#\n", "# \"plot_histograms\" to create fancy plots of the spectram stored in an input workspace\n", "#\n", diff --git a/notebooks/barscan/biosans_wing_detector_barscan.ipynb b/notebooks/barscan/biosans_wing_detector_barscan.ipynb index 15f789e72..48235d1b6 100644 --- a/notebooks/barscan/biosans_wing_detector_barscan.ipynb +++ b/notebooks/barscan/biosans_wing_detector_barscan.ipynb @@ -15,10 +15,11 @@ "outputs": [], "source": [ "import os\n", + "import sys\n", "import time\n", "\n", "# Mantid imports\n", - "from mantid.simpleapi import LoadNexusProcessed\n", + "from mantid.simpleapi import LoadEventNexus, LoadNexusProcessed, SaveNexus\n", "# drtsans imports\n", "from drtsans.pixel_calibration import calculate_barscan_calibration, load_calibration, as_intensities" ] diff --git a/notebooks/biosans/biosans_pixel_calibration_5767.ipynb b/notebooks/biosans/biosans_pixel_calibration_5767.ipynb index a2b775360..6cbb9f0b2 100644 --- a/notebooks/biosans/biosans_pixel_calibration_5767.ipynb +++ b/notebooks/biosans/biosans_pixel_calibration_5767.ipynb @@ -21,7 +21,7 @@ "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadNexus, SaveNexus\n", + "from mantid.simpleapi import LoadEventAsWorkspace2D, LoadNexus, SaveNexus\n", "from drtsans.mono.biosans import calculate_barscan_calibration, as_intensities, plot_detector\n", "from drtsans.pixel_calibration import Table\n", "\n", @@ -562,7 +562,8 @@ "outputs": [], "source": [ "import os\n", - "from drtsans.mono.biosans import load_calibration\n", + "from mantid.simpleapi import LoadEventNexus\n", + "from drtsans.mono.biosans import load_calibration, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\" and \"plot_wing_detector\" are used to plot both detectors separately\n", @@ -665,7 +666,7 @@ "import os\n", "import time\n", "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\" and \"plot_wing_detector\" are used to plot both detectors separately\n", @@ -761,6 +762,7 @@ "metadata": {}, "outputs": [], "source": [ + "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "start_time = time.time()\n", "calibration_main = calculate_apparent_tube_width('flood_workspace', component='detector1',\n", @@ -847,6 +849,7 @@ "from mantid.api import mtd\n", "from mantid.simpleapi import CreateWorkspace, LoadNexus\n", "from drtsans.tubecollection import TubeCollection\n", + "from matplotlib import pyplot as plt\n", "#\n", "# \"plot_histograms\" to create fancy plots of the spectram stored in an input workspace\n", "#\n", diff --git a/notebooks/biosans/biosans_pixel_calibration_5767_with_midrange.ipynb b/notebooks/biosans/biosans_pixel_calibration_5767_with_midrange.ipynb index 960d1077c..683d745eb 100644 --- a/notebooks/biosans/biosans_pixel_calibration_5767_with_midrange.ipynb +++ b/notebooks/biosans/biosans_pixel_calibration_5767_with_midrange.ipynb @@ -626,7 +626,8 @@ "outputs": [], "source": [ "import os\n", - "from drtsans.mono.biosans import load_calibration\n", + "from mantid.simpleapi import LoadEventAsWorkspace2D\n", + "from drtsans.mono.biosans import load_calibration, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -733,7 +734,7 @@ "import os\n", "import time\n", "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -766,7 +767,8 @@ "outputs": [], "source": [ "from drtsans.mono.biosans.simulated_intensities import clone_component_intensities, insert_midrange_detector\n", - "from mantid.simpleapi import mtd, DeleteWorkspaces, Integration, SaveNexus\n", + "from mantid.api import AnalysisDataService\n", + "from mantid.simpleapi import mtd, DeleteWorkspace, DeleteWorkspaces, Integration, LoadEventAsWorkspace2D, SaveNexus\n", "import os\n", "\n", "flood_file = '/HFIR/CG3/IPTS-24666/nexus/CG3_5904.nxs.h5'\n", @@ -833,6 +835,7 @@ "metadata": {}, "outputs": [], "source": [ + "from drtsans.mono.biosans import calculate_apparent_tube_width\n", "\n", "start_time = time.time()\n", "calibration_main = calculate_apparent_tube_width(\n", @@ -926,6 +929,7 @@ "from mantid.api import mtd\n", "from mantid.simpleapi import CreateWorkspace, LoadNexus\n", "from drtsans.tubecollection import TubeCollection\n", + "from matplotlib import pyplot as plt\n", "#\n", "# \"plot_histograms\" to create fancy plots of the spectram stored in an input workspace\n", "#\n", diff --git a/notebooks/biosans/biosans_pixel_calibration_838.ipynb b/notebooks/biosans/biosans_pixel_calibration_838.ipynb index cb2d8a95d..ab7a0a873 100644 --- a/notebooks/biosans/biosans_pixel_calibration_838.ipynb +++ b/notebooks/biosans/biosans_pixel_calibration_838.ipynb @@ -20,7 +20,7 @@ "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadNexus\n", + "from mantid.simpleapi import LoadEventAsWorkspace2D, LoadNexus\n", "from drtsans.mono.biosans import calculate_barscan_calibration, plot_detector\n", "\n", "#\n", @@ -204,7 +204,8 @@ "outputs": [], "source": [ "import os\n", - "from drtsans.mono.biosans import load_calibration\n", + "from mantid.simpleapi import LoadEventAsWorkspace2D\n", + "from drtsans.mono.biosans import load_calibration, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\" and \"plot_wing_detector\" are used to plot both detectors separately\n", @@ -345,10 +346,11 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", "import os\n", "import time\n", "from mantid.simpleapi import LoadNexus\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\" and \"plot_wing_detector\" are used to plot both detectors separately\n", diff --git a/notebooks/biosans/biosans_pixel_calibration_838_midrange.ipynb b/notebooks/biosans/biosans_pixel_calibration_838_midrange.ipynb index 4a9882597..b4ae533b1 100644 --- a/notebooks/biosans/biosans_pixel_calibration_838_midrange.ipynb +++ b/notebooks/biosans/biosans_pixel_calibration_838_midrange.ipynb @@ -20,7 +20,7 @@ "import numpy as np\n", "import os\n", "import time\n", - "from mantid.simpleapi import LoadNexus,LoadNexusProcessed, Integration, mtd\n", + "from mantid.simpleapi import LoadEventAsWorkspace2D, LoadNexus,LoadNexusProcessed, Integration, SaveNexus, mtd\n", "from drtsans.mono.biosans import calculate_barscan_calibration, plot_detector\n", "from drtsans.mono.biosans.simulated_intensities import clone_component_intensities, insert_midrange_detector\n", "#\n", @@ -267,7 +267,8 @@ "outputs": [], "source": [ "import os\n", - "from drtsans.mono.biosans import load_calibration\n", + "from mantid.simpleapi import LoadEventNexus\n", + "from drtsans.mono.biosans import load_calibration, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", @@ -420,9 +421,11 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", "import os\n", "import time\n", - "from drtsans.mono.biosans import calculate_apparent_tube_width\n", + "from mantid.simpleapi import LoadNexus\n", + "from drtsans.mono.biosans import calculate_apparent_tube_width, plot_detector\n", "\n", "#\n", "# \"plot_main_detector\", \"plot_wing_detector\" and \"plot_midrange_detector\" are used to plot both detectors separately\n", diff --git a/notebooks/gpsans/gpsans_pixel_calibration.ipynb b/notebooks/gpsans/gpsans_pixel_calibration.ipynb index a1073a3cd..5effaca81 100644 --- a/notebooks/gpsans/gpsans_pixel_calibration.ipynb +++ b/notebooks/gpsans/gpsans_pixel_calibration.ipynb @@ -355,6 +355,7 @@ "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", "%matplotlib inline\n", + "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] @@ -550,6 +551,7 @@ "\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "%matplotlib inline\n", + "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] @@ -666,6 +668,7 @@ "#\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", + "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] @@ -905,6 +908,7 @@ "#\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", + "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] diff --git a/notebooks/gpsans/gpsans_pixel_calibration_9905.ipynb b/notebooks/gpsans/gpsans_pixel_calibration_9905.ipynb index c581a1449..f2d51475e 100644 --- a/notebooks/gpsans/gpsans_pixel_calibration_9905.ipynb +++ b/notebooks/gpsans/gpsans_pixel_calibration_9905.ipynb @@ -558,6 +558,7 @@ "#\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", + "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] @@ -685,6 +686,7 @@ "source": [ "import numpy as np\n", "from drtsans.tubecollection import TubeCollection\n", + "from matplotlib import pyplot as plt\n", "\n", "#\n", "# \"plot_histograms\" to create fancy plots of the spectram stored in an input workspace\n", @@ -796,6 +798,7 @@ "#\n", "# \"plot_workspace\" is a utility function, which we will use a couple of times\n", "#\n", + "from drtsans.mono.gpsans import plot_detector\n", "def plot_workspace(input_workspace, axes_mode='tube-pixel'):\n", " return plot_detector(input_workspace, backend='mpl',axes_mode=axes_mode, imshow_kwargs={})" ] diff --git a/notebooks/gpsans/gpsans_reduction_1config.ipynb b/notebooks/gpsans/gpsans_reduction_1config.ipynb index 72c8a6a8d..51bd594c0 100644 --- a/notebooks/gpsans/gpsans_reduction_1config.ipynb +++ b/notebooks/gpsans/gpsans_reduction_1config.ipynb @@ -52,7 +52,7 @@ "from pprint import pprint as pretty_print\n", "import time\n", "from drtsans.mono.gpsans import (load_all_files, reduce_single_configuration, plot_reduction_output,\n", - " reduction_parameters, update_reduction_parameters)" + " reduction_parameters, update_reduction_parameters, validate_reduction_parameters)" ] }, { @@ -194,7 +194,7 @@ "from pprint import pprint as pretty_print\n", "import time\n", "from drtsans.mono.gpsans import (load_all_files, reduce_single_configuration, plot_reduction_output,\n", - " update_reduction_parameters)" + " reduction_parameters, update_reduction_parameters, validate_reduction_parameters)" ] }, { diff --git a/notebooks/tubewidth/gpsans_tubewidth.ipynb b/notebooks/tubewidth/gpsans_tubewidth.ipynb index f079523f9..6cb3fb264 100644 --- a/notebooks/tubewidth/gpsans_tubewidth.ipynb +++ b/notebooks/tubewidth/gpsans_tubewidth.ipynb @@ -14,6 +14,8 @@ "outputs": [], "source": [ "# Standard imports\n", + "import os\n", + "import sys\n", "import time\n", "\n", "# Third-party packages\n", @@ -24,9 +26,10 @@ "# Mantid imports\n", "from mantid.simpleapi import LoadEventNexus, Rebin, CreateWorkspace\n", "from mantid.api import mtd\n", + "from mantid import plots\n", "\n", "# drtsans imports\n", - "from drtsans.pixel_calibration import calculate_apparent_tube_width, load_calibration\n", + "from drtsans.pixel_calibration import calculate_apparent_tube_width, load_calibration, as_intensities\n", "from drtsans.plots import plot_detector\n", "from drtsans.tubecollection import TubeCollection" ] diff --git a/src/drtsans/momentum_transfer.py b/src/drtsans/momentum_transfer.py index b0ed1d1a9..99e5acb1e 100644 --- a/src/drtsans/momentum_transfer.py +++ b/src/drtsans/momentum_transfer.py @@ -488,16 +488,14 @@ def pixel_info(input_workspace): number_spectra = ws.getNumberHistograms() info = [ - ( - [np.nan, np.nan, np.nan, False] - if _masked_or_monitor(spectrum_info, i) - else [ - spectrum_info.twoTheta(i), - spectrum_info.azimuthal(i), - spectrum_info.l2(i), - True, - ] - ) + [np.nan, np.nan, np.nan, False] + if _masked_or_monitor(spectrum_info, i) + else [ + spectrum_info.twoTheta(i), + spectrum_info.azimuthal(i), + spectrum_info.l2(i), + True, + ] for i in range(number_spectra) ] info = np.array(info) diff --git a/src/drtsans/pixel_calibration.py b/src/drtsans/pixel_calibration.py index 8807efa5f..429f3c237 100644 --- a/src/drtsans/pixel_calibration.py +++ b/src/drtsans/pixel_calibration.py @@ -1766,14 +1766,14 @@ def as_intensities(input_workspace, component="detector1", views=["positions", " intensities = np.zeros(number_histograms) returned_views = {} - for cal_prop_key, cal_prop_val in pixel_props.items(): # 'positions', 'heights', 'widths', 'positions_mantid' - output_workspace = f"{str(input_workspace)}_{cal_prop_key}" # Workspace containing the property as intensity + for cal_prop, pixel_prop in pixel_props.items(): # 'positions', 'heights', 'widths', 'positions_mantid' + output_workspace = f"{str(input_workspace)}_{cal_prop}" # Workspace containing the property as intensity # intensties will be non-zero only for workpace indexes that have associated pixels of interests - intensities[workspace_indexes] = cal_prop_val + intensities[workspace_indexes] = pixel_prop workspace = Integration(InputWorkspace=input_workspace, OutputWorkspace=output_workspace) for index in range(number_histograms): workspace.dataY(index)[:] = intensities[index] - returned_views[cal_prop_key] = mtd[output_workspace] + returned_views[cal_prop] = mtd[output_workspace] return returned_views diff --git a/src/drtsans/tof/eqsans/geometry.py b/src/drtsans/tof/eqsans/geometry.py index 634eb522d..dc9d1ef5a 100644 --- a/src/drtsans/tof/eqsans/geometry.py +++ b/src/drtsans/tof/eqsans/geometry.py @@ -163,10 +163,9 @@ def source_aperture(other, unit="m"): # Find the appropriate set of slit diameters run_number = int(sample_logs.run_number.value) - for key, val in index_to_diameters.items(): - start, end = key + for start, end in index_to_diameters.items(): if start <= run_number <= end: - index_to_diameter = val + index_to_diameter = index_to_diameters[(start, end)] break # entries vBeamSlit, vBeamSlit2, and vBeamSlit3 contain the slit number, identifying the slit diameter diff --git a/src/drtsans/wavelength.py b/src/drtsans/wavelength.py index 566f21b7a..a7d0eb3fd 100644 --- a/src/drtsans/wavelength.py +++ b/src/drtsans/wavelength.py @@ -143,7 +143,7 @@ def __imul__(self, other): as in the intersection between Wband(0, 1) and Wband(1, 2). """ b = self * other - self = b # noqa: PLW0642 (reassigning self) + self = b # noqa: PLW0642 return self def __eq__(self, other): @@ -288,7 +288,7 @@ def __imul__(self, other): as in the intersection between Wband(0, 1) and Wband(1, 2). """ wb = self * other - self = wb # noqa: PLW0642 (reassigning self) + self = wb # noqa: PLW0642 return self def __str__(self): diff --git a/tests/conftest.py b/tests/conftest.py index c008da084..c1650849a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -424,15 +424,18 @@ def porasil_slice1m(reference_dir): # Absolute path to benchmark files f = dict( s=pjoin(reference_dir.eqsans, "EQSANS_92164.nxs.h5"), # sample - m=pjoin(reference_dir.eqsans, "2017B_mp/beamstop60_mask_4m.nxs"), # mask + m=pjoin(reference_dir.eqsans, "2017B_mp/beamstop60_mask_4m.nxs"), # noqa: E501 mask dc=pjoin(reference_dir.eqsans, "EQSANS_89157.nxs.h5"), # dark current - se=pjoin(reference_dir.eqsans, "Sensitivity_patched_thinPMMA_1o3m_87680_event.nxs"), # sensitivity - dbc=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # direct_beam_center - dbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # direct beam transmission sample - dbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # direct beam transmission empty + se=pjoin( + reference_dir.eqsans, + "Sensitivity_patched_thinPMMA_1o3m_87680_event.nxs", + ), # noqa: E501 + dbc=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # noqa: E501 direct_beam_center + dbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # noqa: E501 direct beam transmission sample + dbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # noqa: E501 direct beam transmission empty b=pjoin(reference_dir.eqsans, "EQSANS_92163.nxs.h5"), # background - bdbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # background direct beam transmission sample - bdbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # background_direct_beam_transmission_empty + bdbts=pjoin(reference_dir.eqsans, "EQSANS_92161.nxs.h5"), # noqa: E501 background direct beam transmission sample + bdbte=pjoin(reference_dir.eqsans, "EQSANS_92160.nxs.h5"), # noqa: E501 background_direct_beam_transmission_empty ) lds = dict( diff --git a/tests/examples/BIOSANS.ipynb b/tests/examples/BIOSANS.ipynb index acc9ab2ad..7ed642705 100644 --- a/tests/examples/BIOSANS.ipynb +++ b/tests/examples/BIOSANS.ipynb @@ -13,15 +13,15 @@ "metadata": {}, "outputs": [], "source": [ + "import os\n", "import numpy as np\n", + "import scipy.stats\n", "import matplotlib.pyplot as plt\n", - "\n", "%matplotlib notebook\n", "\n", "import warnings\n", - "\n", - "warnings.filterwarnings(\"ignore\", module=\"numpy\")\n", - "warnings.filterwarnings(\"ignore\")" + "warnings.filterwarnings('ignore', module='numpy')\n", + "warnings.filterwarnings('ignore')" ] }, { @@ -30,8 +30,9 @@ "metadata": {}, "outputs": [], "source": [ + "from mantid.simpleapi import mtd\n", "from mantid import simpleapi as api\n", - "# from reduction_workflow.instruments.sans.sns_command_interface import *" + "#from reduction_workflow.instruments.sans.sns_command_interface import *" ] }, { @@ -62,13 +63,12 @@ "metadata": {}, "outputs": [], "source": [ - "config = dict(\n", - " sample_offset=0, # 340\n", - " detector_offset=0,\n", - " dark_current=\"/HFIR/CG3/IPTS-23782/nexus/CG3_795.nxs.h5\",\n", - " flood=\"/HFIR/CG3/IPTS-23782/nexus/CG3_821.nxs.h5\",\n", - " prepared_flood=\"/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5\",\n", - ")\n", + "config = dict(sample_offset=0, # 340\n", + " detector_offset=0,\n", + " dark_current=\"/HFIR/CG3/IPTS-23782/nexus/CG3_795.nxs.h5\",\n", + " flood=\"/HFIR/CG3/IPTS-23782/nexus/CG3_821.nxs.h5\",\n", + " prepared_flood = '/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5',\n", + " )\n", "\n", "mask_file = \"/SNS/users/m2d/git/analysis_playground/eqsans/notebooks/data/EQSANS_104088_mask.xml\"\n", "mask_nxs = \"/SNS/users/m2d/git/analysis_playground/eqsans/notebooks/data/EQSANS_104088_mask.nxs\"" @@ -83,16 +83,14 @@ "def load_data(filename, mask_wing=True, center_x=None, center_y=None, center_y_wing=None, output_workspace=None):\n", " ws = api.LoadEventNexus(Filename=filename, OutputWorkspace=output_workspace)\n", " ws = api.HFIRSANS2Wavelength(ws, OutputWorkspace=output_workspace)\n", - " api.LoadInstrument(\n", - " Workspace=ws,\n", - " Filename=\"/SNS/users/3y9/Desktop/BIOSANS-commisioning/BIOSANS_Definition_2019_2100.xml\",\n", - " RewriteSpectraMap=\"True\",\n", - " )\n", + " api.LoadInstrument(Workspace=ws,\n", + " Filename='/SNS/users/3y9/Desktop/BIOSANS-commisioning/BIOSANS_Definition_2019_2100.xml',\n", + " RewriteSpectraMap='True')\n", "\n", " # Mask wing\n", " if mask_wing:\n", - " api.MaskDetectors(ws, ComponentList=\"wing_detector\")\n", - "\n", + " api.MaskDetectors(ws, ComponentList='wing_detector')\n", + " \n", " if center_x is not None and center_y is not None and center_y_wing is not None:\n", " biosans.center_detector(ws, center_x=center_x, center_y=center_y, center_y_wing=center_y_wing)\n", "\n", @@ -105,7 +103,7 @@ "metadata": {}, "outputs": [], "source": [ - "# ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_957.nxs.h5\")\n", + "#ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_957.nxs.h5\")\n", "def plot_det(ws):\n", " nx = 192\n", " ny = 256\n", @@ -121,12 +119,12 @@ "\n", " print(data.shape)\n", "\n", - " counts = data.reshape((nx, ny, len(wl)))\n", - " d_counts = err.reshape((nx, ny, len(wl)))\n", + " counts = data.reshape((nx,ny,len(wl)))\n", + " d_counts = err.reshape((nx,ny,len(wl)))\n", " _counts = np.sum(counts, axis=2)\n", "\n", " print(counts.shape)\n", - " print(counts[:, :, 0].shape)\n", + " print(counts[:,:,0].shape)\n", " plt.figure()\n", " plt.pcolor(_counts.T)" ] @@ -146,20 +144,20 @@ "source": [ "# The new way of dealing with the beam center is in real space, relative to the center of the detector.\n", "# The EQSANS detector is 192 x 256 pixels, and the pixel sizes are 5.5 mm x 4.3 mm\n", - "x_center, y_center = [-0.01230061, -0.0351677]\n", + "x_center, y_center = [-0.01230061, -0.0351677 ]\n", "beam_center_drt = (-0.012300612863652916, -0.035167700781957245, -0.034614155228279528)\n", "\n", "print(\"Old reduction's beam center in real space: %g %g\" % (x_center, y_center))\n", "\n", - "db_ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_815.nxs.h5\", output_workspace=\"beam\")\n", + "db_ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_815.nxs.h5\", output_workspace='beam')\n", "\n", "center = biosans.find_beam_center(db_ws)\n", "\n", "print(\"Beam center found: %g %g %g\" % (center[0], center[1], center[2]))\n", "\n", - "config[\"center_x\"] = center[0]\n", - "config[\"center_y\"] = center[1]\n", - "config[\"center_y_wing\"] = center[2]" + "config['center_x'] = center[0]\n", + "config['center_y'] = center[1]\n", + "config['center_y_wing'] = center[2]" ] }, { @@ -175,17 +173,15 @@ "metadata": {}, "outputs": [], "source": [ - "flood_ws = load_data(\n", - " config[\"flood\"],\n", - " center_x=config[\"center_x\"],\n", - " center_y=config[\"center_y\"],\n", - " center_y_wing=config[\"center_y_wing\"],\n", - " output_workspace=\"CG3_flood\",\n", - ")\n", - "drtsans.calculate_sensitivity_correction(\n", - " flood_ws, filename=\"/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5\", output_workspace=\"CG3_sensitivity\"\n", - ")\n", - "config[\"prepared_flood\"] = \"/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5\"" + "flood_ws = load_data(config['flood'],\n", + " center_x=config['center_x'],\n", + " center_y=config['center_y'],\n", + " center_y_wing=config['center_y_wing'],\n", + " output_workspace='CG3_flood')\n", + "drtsans.calculate_sensitivity_correction(flood_ws, \n", + " filename='/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5',\n", + " output_workspace='CG3_sensitivity')\n", + "config['prepared_flood'] = '/HFIR/CG3/IPTS-23782/shared/CG3_flood_821.h5'" ] }, { @@ -202,39 +198,35 @@ "outputs": [], "source": [ "%%time\n", - "ws = load_data(\n", - " \"/HFIR/CG3/IPTS-23782/nexus/CG3_957.nxs.h5\",\n", - " center_x=config[\"center_x\"],\n", - " center_y=config[\"center_y\"],\n", - " center_y_wing=config[\"center_y_wing\"],\n", - " output_workspace=\"CG3_957\",\n", - ")\n", + "ws = load_data(\"/HFIR/CG3/IPTS-23782/nexus/CG3_957.nxs.h5\",\n", + " center_x=config['center_x'],\n", + " center_y=config['center_y'],\n", + " center_y_wing=config['center_y_wing'],\n", + " output_workspace='CG3_957')\n", "\n", "# Dark current\n", - "dark_ws = load_data(\n", - " config[\"dark_current\"],\n", - " center_x=config[\"center_x\"],\n", - " center_y=config[\"center_y\"],\n", - " center_y_wing=config[\"center_y_wing\"],\n", - " output_workspace=\"CG3_dark\",\n", - ")\n", + "dark_ws = load_data(config['dark_current'],\n", + " center_x=config['center_x'],\n", + " center_y=config['center_y'],\n", + " center_y_wing=config['center_y_wing'],\n", + " output_workspace='CG3_dark')\n", "biosans.subtract_dark_current(ws, dark_ws)\n", "\n", "# Normalization\n", "biosans.normalize_by_time(ws)\n", "\n", "# Solid angle\n", - "# biosans.solid_angle_correction(ws)\n", + "#biosans.solid_angle_correction(ws)\n", "\n", "# Sensitivity\n", - "drtsans.apply_sensitivity_correction(ws, sensitivity_filename=config[\"prepared_flood\"])\n", + "drtsans.apply_sensitivity_correction(ws, sensitivity_filename=config['prepared_flood'])\n", "\n", "# Transmission\n", "\n", "# Background\n", "\n", "# Save output\n", - "# api.SaveNexus(InputWorkspace=ws, Filename=\"/HFIR/CG3/IPTS-23782/shared/CG3_reduced_957.hf5\")" + "#api.SaveNexus(InputWorkspace=ws, Filename=\"/HFIR/CG3/IPTS-23782/shared/CG3_reduced_957.hf5\")" ] }, { @@ -243,17 +235,16 @@ "metadata": {}, "outputs": [], "source": [ - "BinningParams = namedtuple(\"BinningParams\", \"min max bins\")\n", + "BinningParams = namedtuple('BinningParams', 'min max bins')\n", "\n", - "q_data = drtsans.convert_to_q(ws, mode=\"scalar\")\n", + "q_data = drtsans.convert_to_q(ws, mode='scalar')\n", "\n", - "iq_output = biosans.bin_intensity_into_q1d(\n", - " q_data, bin_params=BinningParams(min=0.001, max=0.25, bins=200), linear_binning=False\n", - ") # , bin_method=1)\n", + "iq_output = biosans.bin_intensity_into_q1d(q_data, bin_params=BinningParams(min=0.001, max=0.25, bins=200),\n", + " linear_binning=False)#, bin_method=1)\n", "\n", "print(iq_output.wavelength)\n", "fig, ax = plt.subplots()\n", - "ax.errorbar(iq_output.mod_q, iq_output.intensity, yerr=iq_output.error, label=\"AgBeh\")" + "ax.errorbar(iq_output.mod_q, iq_output.intensity, yerr=iq_output.error, label=\"AgBeh\")\n" ] }, { diff --git a/tests/examples/EQSANS_porasil.ipynb b/tests/examples/EQSANS_porasil.ipynb index 52cd01769..e9f0f425d 100644 --- a/tests/examples/EQSANS_porasil.ipynb +++ b/tests/examples/EQSANS_porasil.ipynb @@ -15,13 +15,12 @@ "source": [ "import os\n", "import numpy as np\n", + "import scipy.stats\n", "import warnings\n", - "\n", - "warnings.filterwarnings(\"ignore\", module=\"numpy\")\n", - "warnings.filterwarnings(\"ignore\")\n", + "warnings.filterwarnings('ignore', module='numpy')\n", + "warnings.filterwarnings('ignore')\n", "\n", "import matplotlib.pyplot as plt\n", - "\n", "%matplotlib notebook" ] }, @@ -31,6 +30,7 @@ "metadata": {}, "outputs": [], "source": [ + "from mantid.simpleapi import mtd\n", "from mantid import simpleapi as api" ] }, @@ -56,17 +56,17 @@ "metadata": {}, "outputs": [], "source": [ - "config = dict( # mask=\"data/EQSANS_88980_mask.xml\",\n", - " bin_width=0.5,\n", - " low_tof_clip=500,\n", - " high_tof_clip=2000,\n", - " detector_offset=0,\n", - " sample_offset=340,\n", - " flux_method=\"proton charge\",\n", - " flux=\"/SNS/EQSANS/shared/instrument_configuration/bl6_flux_at_sample\",\n", - " sensitivity_file_path=\"/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017A_mp/Sensitivity_patched_thinPMMA_4m_79165_event.nxs\",\n", - " dark_current=\"/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017B_mp/EQSANS_86275.nxs.h5\",\n", - ")\n", + "config = dict(#mask=\"data/EQSANS_88980_mask.xml\",\n", + " bin_width=0.5,\n", + " low_tof_clip=500,\n", + " high_tof_clip=2000,\n", + " detector_offset=0,\n", + " sample_offset=340,\n", + " flux_method='proton charge',\n", + " flux=\"/SNS/EQSANS/shared/instrument_configuration/bl6_flux_at_sample\",\n", + " sensitivity_file_path=\"/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017A_mp/Sensitivity_patched_thinPMMA_4m_79165_event.nxs\",\n", + " dark_current=\"/SNS/EQSANS/shared/NeXusFiles/EQSANS/2017B_mp/EQSANS_86275.nxs.h5\",\n", + " )\n", "\n", "mask_file = \"/SNS/users/m2d/git/analysis_playground/eqsans/notebooks/data/EQSANS_88980_mask.xml\"" ] @@ -87,16 +87,16 @@ "source": [ "# The new way of dealing with the beam center is in real space, relative to the center of the detector.\n", "# The EQSANS detector is 192 x 256 pixels, and the pixel sizes are 5.5 mm x 4.3 mm\n", - "x_center = -(192 / 2.0 - 90.93) * 0.0055\n", - "y_center = (256 / 2.0 - 131.47) * 0.0043\n", + "x_center = -(192/2.0 - 90.93) * 0.0055\n", + "y_center = (256/2.0 - 131.47) * 0.0043\n", "print(\"Old reduction's beam center in real space: %g %g\" % (x_center, y_center))\n", "\n", "db_ws = eqsans.load_events(\"EQSANS_88973\")\n", "center = eqsans.center_detector(db_ws)\n", "print(\"Beam center found: %g %g\" % (center[0], center[1]))\n", "\n", - "config[\"x_center\"] = x_center\n", - "config[\"y_center\"] = y_center" + "config['x_center'] = x_center\n", + "config['y_center'] = y_center" ] }, { @@ -124,7 +124,7 @@ "if apply_transmission:\n", " ws_tr_sample = eqsans.prepare_data(\"EQSANS_88975\", **config)\n", " ws_tr_direct = eqsans.prepare_data(\"EQSANS_88973\", **config)\n", - " tr_ws = eqsans.calculate_transmission(ws_tr_sample, ws_tr_direct, radius=None, radius_unit=\"mm\")\n", + " tr_ws = eqsans.calculate_transmission(ws_tr_sample, ws_tr_direct, radius=None, radius_unit='mm')\n", " ws = eqsans.apply_transmission_correction(ws, trans_workspace=tr_ws)\n", "\n", "# Background\n", @@ -135,7 +135,7 @@ " ws_tr_back = eqsans.prepare_data(\"EQSANS_88974\", **config)\n", " ws_tr_direct = eqsans.prepare_data(\"EQSANS_88973\", **config)\n", "\n", - " tr_ws = eqsans.calculate_transmission(ws_tr_back, ws_tr_direct, radius=None, radius_unit=\"mm\")\n", + " tr_ws = eqsans.calculate_transmission(ws_tr_back, ws_tr_direct, radius=None, radius_unit='mm')\n", " ws_bck = eqsans.apply_transmission_correction(ws_bck, trans_workspace=tr_ws)\n", "\n", "ws = eqsans.subtract_background(ws, background=ws_bck)\n", @@ -151,7 +151,7 @@ "outputs": [], "source": [ "%%time\n", - "table_ws_list = eqsans.prepare_momentum_transfer(ws, wavelength_binning=[config[\"bin_width\"]])" + "table_ws_list = eqsans.prepare_momentum_transfer(ws, wavelength_binning=[config['bin_width']])" ] }, { @@ -164,7 +164,7 @@ "iq_ws_f1 = eqsans.cal_iq(table_ws_list[0], bins=100, log_binning=True)\n", "\n", "# Save output\n", - "filepath = os.path.join(os.path.expanduser(\"~\"), iq_ws_f1.name() + \".txt\")\n", + "filepath = os.path.join(os.path.expanduser('~'), iq_ws_f1.name()+'.txt')\n", "api.SaveAscii(iq_ws_f1.name(), Filename=filepath, WriteSpectrumID=False, WriteXError=True)" ] }, @@ -178,7 +178,7 @@ "if len(table_ws_list) == 2:\n", " iq_ws_f2 = eqsans.cal_iq(table_ws_list[1], bins=150, log_binning=True)\n", "\n", - " filepath = os.path.join(os.path.expanduser(\"~\"), iq_ws_f2.name() + \".txt\")\n", + " filepath = os.path.join(os.path.expanduser('~'), iq_ws_f2.name()+'.txt')\n", " api.SaveAscii(iq_ws_f2.name(), Filename=filepath, WriteSpectrumID=False, WriteXError=True)" ] }, @@ -200,24 +200,24 @@ "source": [ "scale_match = 300000\n", "\n", - "data_dir = os.path.expanduser(\"~\")\n", - "reduced_f1 = np.loadtxt(os.path.join(data_dir, \"EQSANS_88980_frame1_iq.txt\"), delimiter=\",\")\n", - "reduced_f2 = np.loadtxt(os.path.join(data_dir, \"EQSANS_88980_frame2_iq.txt\"), delimiter=\",\")\n", + "data_dir = os.path.expanduser('~')\n", + "reduced_f1 = np.loadtxt(os.path.join(data_dir, 'EQSANS_88980_frame1_iq.txt'), delimiter=',')\n", + "reduced_f2 = np.loadtxt(os.path.join(data_dir, 'EQSANS_88980_frame2_iq.txt'), delimiter=',')\n", "\n", "fig, ax = plt.subplots()\n", - "ax.errorbar(reduced_f1.T[0], reduced_f1.T[1] / scale_match, yerr=reduced_f1.T[2] / scale_match, label=\"New Frame #1\")\n", - "ax.errorbar(reduced_f2.T[0], reduced_f2.T[1] / scale_match, yerr=reduced_f2.T[2] / scale_match, label=\"New Frame #2\")\n", + "ax.errorbar(reduced_f1.T[0], reduced_f1.T[1]/scale_match, yerr=reduced_f1.T[2]/scale_match, label=\"New Frame #1\")\n", + "ax.errorbar(reduced_f2.T[0], reduced_f2.T[1]/scale_match, yerr=reduced_f2.T[2]/scale_match, label=\"New Frame #2\")\n", "\n", "# To compare to reference data, execute the EQSANS_porasil.py script on one of the\n", "# analysis computers to create the reference data.\n", - "if os.path.exists(os.path.join(data_dir, \"EQSANS_88980_frame1_iq.txt\")):\n", - " ref_f1 = np.loadtxt(os.path.join(data_dir, \"EQSANS_88980_frame1_iq_ref.txt\"), delimiter=\",\")\n", - " ref_f2 = np.loadtxt(os.path.join(data_dir, \"EQSANS_88980_frame2_iq_ref.txt\"), delimiter=\",\")\n", + "if os.path.exists(os.path.join(data_dir, 'EQSANS_88980_frame1_iq.txt')):\n", + " ref_f1 = np.loadtxt(os.path.join(data_dir, 'EQSANS_88980_frame1_iq_ref.txt'), delimiter=',')\n", + " ref_f2 = np.loadtxt(os.path.join(data_dir, 'EQSANS_88980_frame2_iq_ref.txt'), delimiter=',')\n", " ax.errorbar(ref_f1.T[0], ref_f1.T[1], yerr=ref_f1.T[2], label=\"Old Frame #1\")\n", " ax.errorbar(ref_f2.T[0], ref_f2.T[1], yerr=ref_f2.T[2], label=\"Old Frame #2\")\n", "\n", - "ax.set_yscale(\"log\")\n", - "ax.set_xscale(\"log\")\n", + "ax.set_yscale('log')\n", + "ax.set_xscale('log')\n", "ax.legend()" ] } diff --git a/tests/examples/EQSANS_porasil.py b/tests/examples/EQSANS_porasil.py index d92719c83..ad88f8f7b 100644 --- a/tests/examples/EQSANS_porasil.py +++ b/tests/examples/EQSANS_porasil.py @@ -1,7 +1,6 @@ """ -EQSANS example for the legacy reduction + EQSANS example for the legacy reduction """ - # flake8: noqa import os diff --git a/tests/examples/porasil.py b/tests/examples/porasil.py index 2baf20e44..61f27edcd 100644 --- a/tests/examples/porasil.py +++ b/tests/examples/porasil.py @@ -1,7 +1,7 @@ # flake8: noqa """ -The following is a real-life example of an EQSANS reduction script. -It uses the current Mantid reduction for EQSANS. + The following is a real-life example of an EQSANS reduction script. + It uses the current Mantid reduction for EQSANS. """ # EQSANS reduction script # Script automatically generated on Fri Mar 3 12:00:50 2017 From f8a43e951ae82b784451e73b11999581959417fb Mon Sep 17 00:00:00 2001 From: glass-ships Date: Thu, 6 Feb 2025 14:18:34 -0500 Subject: [PATCH 16/23] fix geometry.py --- src/drtsans/tof/eqsans/geometry.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/drtsans/tof/eqsans/geometry.py b/src/drtsans/tof/eqsans/geometry.py index dc9d1ef5a..cb7992696 100644 --- a/src/drtsans/tof/eqsans/geometry.py +++ b/src/drtsans/tof/eqsans/geometry.py @@ -163,9 +163,10 @@ def source_aperture(other, unit="m"): # Find the appropriate set of slit diameters run_number = int(sample_logs.run_number.value) - for start, end in index_to_diameters.items(): - if start <= run_number <= end: - index_to_diameter = index_to_diameters[(start, end)] + for (start_run_number, end_run_number), slits in index_to_diameters.items(): + print(f"start_run_number, end_run_number: {start_run_number}, {end_run_number}") + if start_run_number <= run_number <= end_run_number: + index_to_diameter = slits break # entries vBeamSlit, vBeamSlit2, and vBeamSlit3 contain the slit number, identifying the slit diameter From 2b965e4ce86544633fa0715e3a886535344cc67b Mon Sep 17 00:00:00 2001 From: Glass Date: Mon, 10 Feb 2025 12:56:42 -0500 Subject: [PATCH 17/23] Update .pre-commit-config.yaml --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2915613e3..fef09f631 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -20,7 +20,7 @@ repos: - id: end-of-file-fixer - id: sort-simple-yaml - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.3 + rev: v0.9.4 hooks: - id: ruff args: [--no-cache, --fix, --exit-non-zero-on-fix] From f7e58306a3614b2c97f161286d950f78a6d8445b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 17:56:49 +0000 Subject: [PATCH 18/23] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../gpsans_spice_pixel_map_template.py | 2 +- src/drtsans/auto_wedge.py | 5 +- src/drtsans/dataobjects.py | 4 +- src/drtsans/detector.py | 4 +- src/drtsans/determine_bins.py | 6 +- src/drtsans/files/hdf5_rw.py | 10 +-- src/drtsans/files/log_h5_reader.py | 4 +- src/drtsans/geometry.py | 6 +- src/drtsans/iq.py | 4 +- src/drtsans/momentum_transfer.py | 2 +- src/drtsans/mono/biosans/api.py | 6 +- .../mono/biosans/cg3_spice_to_nexus.py | 4 +- src/drtsans/mono/convert_xml_to_nexus.py | 4 +- src/drtsans/mono/gpsans/api.py | 12 ++-- src/drtsans/mono/gpsans/cg2_spice_to_nexus.py | 2 +- src/drtsans/mono/load.py | 8 +-- src/drtsans/mono/meta_data.py | 26 +++----- src/drtsans/mono/spice_data.py | 4 +- src/drtsans/path.py | 2 +- src/drtsans/pixel_calibration.py | 4 +- src/drtsans/prepare_sensivities_correction.py | 5 +- src/drtsans/redparams.py | 6 +- src/drtsans/reductionlog.py | 6 +- src/drtsans/resolution.py | 2 +- src/drtsans/samplelogs.py | 8 +-- ...sensitivity_correction_moving_detectors.py | 2 +- src/drtsans/sensitivity_correction_patch.py | 2 +- src/drtsans/tof/eqsans/api.py | 6 +- src/drtsans/tof/eqsans/correction_api.py | 6 +- .../eqsans/elastic_reference_normalization.py | 2 +- .../tof/eqsans/incoherence_correction_1d.py | 6 +- src/drtsans/tof/eqsans/meta_data.py | 2 +- src/drtsans/tof/eqsans/reduction_api.py | 6 +- tests/conftest.py | 2 +- .../mono/biosans/test_create_event_nexus.py | 2 +- .../drtsans/mono/biosans/test_load.py | 36 +++++------ .../test_overwrite_geometry_meta_data.py | 2 +- .../test_prepare_sensitivities_spice.py | 2 +- .../mono/biosans/test_reduction_spice.py | 2 +- .../mono/gpsans/test_find_beam_center.py | 2 +- .../drtsans/mono/gpsans/test_load.py | 24 +++---- .../mono/gpsans/test_pixel_map_spice.py | 6 +- tests/integration/drtsans/mono/test_load.py | 14 ++--- tests/integration/drtsans/test_auto_wedge.py | 12 ++-- .../drtsans/tof/eqsans/test_reduction_api.py | 24 ++++--- .../unit/drtsans/i_of_q_binning_tests_data.py | 2 +- tests/unit/drtsans/test_event_nexus_nodes.py | 2 +- tests/unit/drtsans/test_i_of_q_1d_binning.py | 40 ++++++------ tests/unit/drtsans/test_i_of_q_2d_binning.py | 62 +++++++++---------- .../drtsans/test_i_of_q_annular_binning.py | 2 +- .../unit/drtsans/test_i_of_q_wedge_binning.py | 2 +- tests/unit/drtsans/test_plotting.py | 4 +- tests/unit/drtsans/test_savereductionlog.py | 4 +- 53 files changed, 200 insertions(+), 224 deletions(-) diff --git a/scripts/jupyter_notebooks/gpsans_spice_pixel_map_template.py b/scripts/jupyter_notebooks/gpsans_spice_pixel_map_template.py index f675567f1..840791d46 100644 --- a/scripts/jupyter_notebooks/gpsans_spice_pixel_map_template.py +++ b/scripts/jupyter_notebooks/gpsans_spice_pixel_map_template.py @@ -98,7 +98,7 @@ def show_calibration_stage1(raw_flood_ws_name, database_file): # Plot flood workspace raw and calibrated print("#####\n\nCompare applying the calibration to flood (stage 1)") - calibrated_flood_ws_name = f'demo_calibrated1_flood_{raw_flood_ws_name.split("flood_")[1]}' + calibrated_flood_ws_name = f"demo_calibrated1_flood_{raw_flood_ws_name.split('flood_')[1]}" apply_calibrations( raw_flood_ws_name, output_workspace=calibrated_flood_ws_name, diff --git a/src/drtsans/auto_wedge.py b/src/drtsans/auto_wedge.py index 3a12efa31..87e40cac1 100644 --- a/src/drtsans/auto_wedge.py +++ b/src/drtsans/auto_wedge.py @@ -501,8 +501,7 @@ def _estimatePeakParameters(intensity, azimuthal, azimuthal_start, window_half_w break # output print( - f"[WEDGE FIT] azimuthal: {azimuthal_new}, {azimuthal_last} with " - f"left and right as {left_index}, {right_index}" + f"[WEDGE FIT] azimuthal: {azimuthal_new}, {azimuthal_last} with left and right as {left_index}, {right_index}" ) # now use the first two moments of the data within the window to give an improved center position (first moment) @@ -837,7 +836,7 @@ def _fitQAndAzimuthal( fit_result_dict[index]["error"] = error_reason continue else: - fitted_peaks_message += f"spectrum {index-1}: Fitted peaks: {newlyFittedPeaks}\n" + fitted_peaks_message += f"spectrum {index - 1}: Fitted peaks: {newlyFittedPeaks}\n" for i in range(len(peakResults)): peakResults[i].append(newlyFittedPeaks[i]) q_centers_used.append(q_center) diff --git a/src/drtsans/dataobjects.py b/src/drtsans/dataobjects.py index 368d95de4..16d37e3cf 100644 --- a/src/drtsans/dataobjects.py +++ b/src/drtsans/dataobjects.py @@ -619,10 +619,10 @@ def __new__(cls, intensity, error, qx, qy, delta_qx=None, delta_qy=None, wavelen # Sanity check assert qx.shape == intensity.shape, ( - f"qx and intensity must have same shapes. " f"It is not now: {qx.shape} vs {intensity.shape}" + f"qx and intensity must have same shapes. It is not now: {qx.shape} vs {intensity.shape}" ) assert qy.shape == intensity.shape, ( - f"qy and intensity must have same shapes. " f"It is not now: {qy.shape} vs {intensity.shape}" + f"qy and intensity must have same shapes. It is not now: {qy.shape} vs {intensity.shape}" ) # pass everything to namedtuple diff --git a/src/drtsans/detector.py b/src/drtsans/detector.py index 546d8f56d..8aa57d4b6 100644 --- a/src/drtsans/detector.py +++ b/src/drtsans/detector.py @@ -228,7 +228,7 @@ def _detector_first_ws_index(self, first_det_id): self.first_index = ws_index break else: - raise ValueError("Iterared WS and did not find first det id = " "{}".format(first_det_id)) + raise ValueError("Iterared WS and did not find first det id = {}".format(first_det_id)) def masked_ws_indices(self): """ @@ -256,7 +256,7 @@ def monitor_indices(self): return np.array([]) def __str__(self): - return "Component: {} with {} pixels (dim x={}, dim y={})." " First index = {}.".format( + return "Component: {} with {} pixels (dim x={}, dim y={}). First index = {}.".format( self._component_name, self.dims, self.dim_x, diff --git a/src/drtsans/determine_bins.py b/src/drtsans/determine_bins.py index 04af5b39d..172da8e21 100644 --- a/src/drtsans/determine_bins.py +++ b/src/drtsans/determine_bins.py @@ -31,7 +31,7 @@ def determine_1d_linear_bins(x_min, x_max, bins): # Check input x min and x max if x_min is None or x_max is None or x_min >= x_max: raise RuntimeError( - "x min {} and x max {} must not be None and x min shall be less than x max" "".format(x_min, x_max) + "x min {} and x max {} must not be None and x min shall be less than x max".format(x_min, x_max) ) # force the number of bins to be an integer and error check it bins = int(bins) @@ -104,9 +104,7 @@ def determine_1d_log_bins(x_min, x_max, decade_on_center, n_bins_per_decade=None # case that is not supported if decade_on_center: - assert n_bins_per_decade is not None, ( - "For option decade_on_center, number of bins per decade " "is required" - ) + assert n_bins_per_decade is not None, "For option decade_on_center, number of bins per decade is required" x_ref = x_min # calculate bin step size diff --git a/src/drtsans/files/hdf5_rw.py b/src/drtsans/files/hdf5_rw.py index 2695f9613..41f9be2b7 100644 --- a/src/drtsans/files/hdf5_rw.py +++ b/src/drtsans/files/hdf5_rw.py @@ -79,7 +79,7 @@ def match(self, other_node): # compare class type if not isinstance(other_node, type(self)): raise TypeError( - "Try to match instance of class {} (other) to {} (self)" "".format(type(other_node), type(self)) + "Try to match instance of class {} (other) to {} (self)".format(type(other_node), type(self)) ) # compare name @@ -89,14 +89,14 @@ def match(self, other_node): # compare attributes if set(self._attributes.keys()) != set(other_node.attributes.keys()): print( - "Data node {} Attributes are not same:\nself - other = {}]\nother - self = {}" "".format( + "Data node {} Attributes are not same:\nself - other = {}]\nother - self = {}".format( self.name, set(self._attributes.keys()) - set(other_node.attributes.keys()), set(other_node.attributes.keys()) - set(self._attributes.keys()), ) ) raise KeyError( - "Data node {} Attributes are not same:\nself - other = {}]\nother - self = {}" "".format( + "Data node {} Attributes are not same:\nself - other = {}]\nother - self = {}".format( self.name, set(self._attributes.keys()) - set(other_node.attributes.keys()), set(other_node.attributes.keys()) - set(self._attributes.keys()), @@ -107,7 +107,7 @@ def match(self, other_node): error_msg = "" for attr_name in self._attributes.keys(): if self._attributes[attr_name] != other_node.attributes[attr_name]: - error_msg += "Mismatch attribute {} value: self = {}, other = {}" "".format( + error_msg += "Mismatch attribute {} value: self = {}, other = {}".format( attr_name, self._attributes[attr_name], other_node.attributes[attr_name], @@ -186,7 +186,7 @@ def write_attributes(self, curr_entry): except TypeError as type_error: print(f"[ERROR] {self._name}-node attribute {attr_name} is of type {type(attr_name)}") raise TypeError( - f"[ERROR] {self._name}-node attribute {attr_name} is of type " f"{type(attr_name)}: {type_error}" + f"[ERROR] {self._name}-node attribute {attr_name} is of type {type(attr_name)}: {type_error}" ) diff --git a/src/drtsans/files/log_h5_reader.py b/src/drtsans/files/log_h5_reader.py index a95e6a890..44e39a213 100644 --- a/src/drtsans/files/log_h5_reader.py +++ b/src/drtsans/files/log_h5_reader.py @@ -76,7 +76,7 @@ def compare_reduced_iq(test_log_file, gold_log_file, title: str, prefix: str): # Output error message if test_exception: - base_name = f'{prefix}{os.path.basename(test_log_file).split(".")[0]}' + base_name = f"{prefix}{os.path.basename(test_log_file).split('.')[0]}" report_difference( (test_q_vec, test_intensity_vec), (gold_q_vec, gold_intensity_vec), @@ -211,7 +211,7 @@ def verify_cg2_reduction_results(sample_names, output_dir, gold_path, title, pre try: compare_reduced_iq(output_log_file, gold_log_file, title_i, prefix) except AssertionError as unmatched_error: - unmatched_errors = "Testing output {} is different from gold result {}:\n{}" "".format( + unmatched_errors = "Testing output {} is different from gold result {}:\n{}".format( output_log_file, gold_log_file, unmatched_error ) # END-FOR diff --git a/src/drtsans/geometry.py b/src/drtsans/geometry.py index d3816f560..7f7e8c90e 100644 --- a/src/drtsans/geometry.py +++ b/src/drtsans/geometry.py @@ -131,7 +131,7 @@ def bank_workspace_index_range(input_workspace, component=""): for i in range(input_workspace.getNumberHistograms()): ids = input_workspace.getSpectrum(i).getDetectorIDs() if len(ids) > 1: - raise RuntimeError("do not know how to work with more than one " "detector per spectrum ({})".format(ids)) + raise RuntimeError("do not know how to work with more than one detector per spectrum ({})".format(ids)) if ids[0] == detector_id_first: first = i break @@ -786,7 +786,7 @@ def translate_sample_by_z(workspace, z): ) workspace = mtd[ws_name] logger.debug( - "Instrument sample position is moved to {}" "".format(workspace.getInstrument().getSample().getPos()) + "Instrument sample position is moved to {}".format(workspace.getInstrument().getSample().getPos()) ) # update the appropriate log @@ -836,7 +836,7 @@ def translate_detector_by_z(input_workspace, z=None, relative=True): update_log = True if (not relative) or (z != 0.0): logger.debug( - "Moving detector along Z = {} is relative = {} to component {}" "".format( + "Moving detector along Z = {} is relative = {} to component {}".format( z, relative, main_detector_name(input_workspace) ) ) diff --git a/src/drtsans/iq.py b/src/drtsans/iq.py index cd03581c6..edca6e3f3 100644 --- a/src/drtsans/iq.py +++ b/src/drtsans/iq.py @@ -117,14 +117,14 @@ def valid_wedge(min_angle, max_angle) -> List[Tuple[float, float]]: if diff < 180.0: return [(min_angle, max_angle)] raise ValueError( - "wedge angle is greater than 180 degrees: {:.1f} - {:.1f} = {:.1f} < 180" "".format( + "wedge angle is greater than 180 degrees: {:.1f} - {:.1f} = {:.1f} < 180".format( max_angle, min_angle, diff ) ) diff = min_angle - max_angle if diff <= 180: raise ValueError( - "wedge angle is greater than 180 degrees: {:.1f} - {:.1f} = {:.1f} <= 180" "".format( + "wedge angle is greater than 180 degrees: {:.1f} - {:.1f} = {:.1f} <= 180".format( min_angle, max_angle, diff ) ) diff --git a/src/drtsans/momentum_transfer.py b/src/drtsans/momentum_transfer.py index 99e5acb1e..e54da3797 100644 --- a/src/drtsans/momentum_transfer.py +++ b/src/drtsans/momentum_transfer.py @@ -83,7 +83,7 @@ def convert_to_q(ws, mode, resolution_function=None, **kwargs): wsh = mtd[str(ws)] if wsh.getAxis(0).getUnit().unitID() != "Wavelength": raise RuntimeError( - "Input workspace {} for calculate Q and resolution must be in unit Wavelength but not {}" "".format( + "Input workspace {} for calculate Q and resolution must be in unit Wavelength but not {}".format( wsh, wsh.getAxis(0).getUnit().unitID() ) ) diff --git a/src/drtsans/mono/biosans/api.py b/src/drtsans/mono/biosans/api.py index 6f859609b..94e1c885d 100644 --- a/src/drtsans/mono/biosans/api.py +++ b/src/drtsans/mono/biosans/api.py @@ -1516,8 +1516,8 @@ def _prepare_sample_transmission_ws(_sample_transmission): if debug_output: from mantid.simpleapi import SaveNexusProcessed - main_name = f'{form_output_name(processed_data_main).split(".")[0]}.nxs' - wing_name = f'{form_output_name(processed_data_wing).split(".")[0]}.nxs' + main_name = f"{form_output_name(processed_data_main).split('.')[0]}.nxs" + wing_name = f"{form_output_name(processed_data_wing).split('.')[0]}.nxs" # remove history to write less data and speed up I/O if reduction_config["removeAlgorithmHistory"]: RemoveWorkspaceHistory(processed_data_main) @@ -1536,7 +1536,7 @@ def _prepare_sample_transmission_ws(_sample_transmission): backend="mpl", ) # , imshow_kwargs={'norm': LogNorm(vmin=1)}) if reduction_config["has_midrange_detector"]: - midrange_name = f'{form_output_name(processed_data_midrange).split(".")[0]}.nxs' + midrange_name = f"{form_output_name(processed_data_midrange).split('.')[0]}.nxs" # remove history to write less data and speed up I/O if reduction_config["removeAlgorithmHistory"]: RemoveWorkspaceHistory(processed_data_midrange) diff --git a/src/drtsans/mono/biosans/cg3_spice_to_nexus.py b/src/drtsans/mono/biosans/cg3_spice_to_nexus.py index bb12e53f0..faec3fc22 100644 --- a/src/drtsans/mono/biosans/cg3_spice_to_nexus.py +++ b/src/drtsans/mono/biosans/cg3_spice_to_nexus.py @@ -126,7 +126,7 @@ def get_pid_range(self, bank_id): # Check input valid if bank_id < 1 or bank_id > self.num_banks: - raise RuntimeError(f"CG3 (BioSANS) has 88 banks indexed from 1 to 88. " f"Bank {bank_id} is out of range.") + raise RuntimeError(f"CG3 (BioSANS) has 88 banks indexed from 1 to 88. Bank {bank_id} is out of range.") # calculate starting PID if bank_id <= 24: @@ -212,7 +212,7 @@ def convert_spice_to_nexus( os.mkdir(output_dir) except (OSError, IOError) as dir_err: raise RuntimeError( - f"Output directory {output_dir} doesn't exist." f"Unable to create {output_dir} due to {dir_err}" + f"Output directory {output_dir} doesn't exist.Unable to create {output_dir} due to {dir_err}" ) # output file name diff --git a/src/drtsans/mono/convert_xml_to_nexus.py b/src/drtsans/mono/convert_xml_to_nexus.py index f41fbdeeb..169347683 100644 --- a/src/drtsans/mono/convert_xml_to_nexus.py +++ b/src/drtsans/mono/convert_xml_to_nexus.py @@ -224,7 +224,7 @@ def mask_spice_detector_pixels(self, pixel_index_list: List[int]): self._spice_detector_counts[pid] = 0 except IndexError as index_error: raise RuntimeError( - f"Pixel ID {pid} is out of range {self._spice_detector_counts.shape}. " f"FYI: {index_error}" + f"Pixel ID {pid} is out of range {self._spice_detector_counts.shape}. FYI: {index_error}" ) @staticmethod @@ -293,7 +293,7 @@ def _retrieve_meta_data(spice_file_name, das_spice_log_map): # check unit if unit != default_unit: raise RuntimeError( - f"SPICE log {spice_log_name} has unit {unit} different from " f"expected {default_unit}" + f"SPICE log {spice_log_name} has unit {unit} different from expected {default_unit}" ) das_log_values[nexus_log_name] = value, unit diff --git a/src/drtsans/mono/gpsans/api.py b/src/drtsans/mono/gpsans/api.py index 7b669bd41..ec4bfb8a3 100644 --- a/src/drtsans/mono/gpsans/api.py +++ b/src/drtsans/mono/gpsans/api.py @@ -321,9 +321,7 @@ def load_all_files( smearing_pixel_size_y=smearing_pixel_size_y_dict[meta_data.SAMPLE], ) logger.information( - "[META] Wavelength range is from {} to {}" "".format( - mtd[ws_name].readX(0)[0], mtd[ws_name].readX(0)[1] - ) + "[META] Wavelength range is from {} to {}".format(mtd[ws_name].readX(0)[0], mtd[ws_name].readX(0)[1]) ) # Apply mask for btp_params in default_mask: @@ -820,7 +818,7 @@ def prepare_data_workspaces( mask_btp = dict() if debug: # output masking information - logger.notice(f"mask panel: {mask_panel}\n" f"mask ws : {str(mask_ws)}\n" f"mask btp : {mask_btp}") + logger.notice(f"mask panel: {mask_panel}\nmask ws : {str(mask_ws)}\nmask btp : {mask_btp}") if mask_ws is not None: SaveNexusProcessed( InputWorkspace=mask_ws, @@ -1162,7 +1160,7 @@ def reduce_single_configuration(loaded_ws, reduction_input, prefix="", skip_nan= } # auto-aniso returns all of the wedges symmetric_wedges = False - logger.debug(f'Wedge peak search window size factor: {autoWedgeOpts["peak_search_window_size_factor"]}') + logger.debug(f"Wedge peak search window size factor: {autoWedgeOpts['peak_search_window_size_factor']}") fbc_options = fbc_options_json(reduction_input) xc, yc, fit_results = find_beam_center(loaded_ws.center, **fbc_options) @@ -1313,9 +1311,7 @@ def reduce_single_configuration(loaded_ws, reduction_input, prefix="", skip_nan= logger.notice(f"Auto wedge options: {autoWedgeOpts}") autoWedgeOpts["debug_dir"] = output_dir wedges = getWedgeSelection(iq2d_main_in, **autoWedgeOpts) - logger.notice( - f"found wedge angles:\n" f" peak: {wedges[0]}\n" f" background: {wedges[1]}" - ) + logger.notice(f"found wedge angles:\n peak: {wedges[0]}\n background: {wedges[1]}") # sanity check assert len(wedges) == 2, f"Auto-wedges {wedges} shall have 2 2-tuples" diff --git a/src/drtsans/mono/gpsans/cg2_spice_to_nexus.py b/src/drtsans/mono/gpsans/cg2_spice_to_nexus.py index fbf89d9af..503950a1e 100644 --- a/src/drtsans/mono/gpsans/cg2_spice_to_nexus.py +++ b/src/drtsans/mono/gpsans/cg2_spice_to_nexus.py @@ -43,7 +43,7 @@ def get_pid_range(self, bank_id): """ # Check input valid if bank_id < 1 or bank_id > 48: - raise RuntimeError(f"CG2 (GP-SANS) has 88 banks indexed from 1 to 48. " f"Bank {bank_id} is out of range.") + raise RuntimeError(f"CG2 (GP-SANS) has 88 banks indexed from 1 to 48. Bank {bank_id} is out of range.") # calculate starting PID if bank_id <= 24: diff --git a/src/drtsans/mono/load.py b/src/drtsans/mono/load.py index 18a5f540c..ecc4a20ce 100644 --- a/src/drtsans/mono/load.py +++ b/src/drtsans/mono/load.py @@ -296,7 +296,7 @@ def set_sample_detector_position( raise RuntimeError( f"Workspace {str(ws)}: after loading and initial setup, DAS SDD ({das_sdd})" f"is not equal to calculated/real SDD ({real_sdd}) by proportion as " - f"{abs(das_sdd - real_sdd)/das_sdd}" + f"{abs(das_sdd - real_sdd) / das_sdd}" ) # Get original sample detector distance: find expected SDD for further verification @@ -318,7 +318,7 @@ def set_sample_detector_position( f"Prior to any geometry correction:\n" f"Sample to detector distance = {sample_detector_distance(ws, search_logs=False)}" f"(calculated) vs {sample_detector_distance(ws, search_logs=True)} (meta) mm.\n" - f' SampleToSi = {logs.find_log_with_units(sample_to_si_window_name, unit="mm")} mm\n' + f" SampleToSi = {logs.find_log_with_units(sample_to_si_window_name, unit='mm')} mm\n" f"Overwrite Values = {sample_si_window_overwrite_value}, " f"{sample_detector_distance_overwrite_value}\n" ) @@ -346,7 +346,7 @@ def set_sample_detector_position( # Check current instrument setup and meta data (sample logs) logger.notice( - "{} Sample to detector distance = {} (calculated) vs {} (meta) mm" "".format( + "{} Sample to detector distance = {} (calculated) vs {} (meta) mm".format( str(ws), sample_detector_distance(ws, search_logs=False), sample_detector_distance(ws, search_logs=True), @@ -363,7 +363,7 @@ def set_sample_detector_position( prior_geom_info += ( f"Result from geometry operation:\n" f"Sample position = {ws.getInstrument().getSample().getPos()}\n" - f'SampleToSi = {logs.find_log_with_units(sample_to_si_window_name, unit="mm")}' + f"SampleToSi = {logs.find_log_with_units(sample_to_si_window_name, unit='mm')}" f"mm (From Log)\n" ) # add detector information diff --git a/src/drtsans/mono/meta_data.py b/src/drtsans/mono/meta_data.py index 3b81c61f0..863cf1ede 100644 --- a/src/drtsans/mono/meta_data.py +++ b/src/drtsans/mono/meta_data.py @@ -113,7 +113,7 @@ def parse_json_meta_data( except KeyError as key_error: # Required value cannot be found raise KeyError( - "JSON file shall have key as [configuration][{}]. Error message: {}" "".format(meta_name, key_error) + "JSON file shall have key as [configuration][{}]. Error message: {}".format(meta_name, key_error) ) return overwrite_dict @@ -149,9 +149,7 @@ def _parse_new_meta_data_json(reduction_input, meta_name, unit_conversion_factor except KeyError as key_error: # Required value cannot be found raise KeyError( - "JSON file shall have key as configuration:{}:{}. Error message: {}" "".format( - meta_name, run_type, key_error - ) + "JSON file shall have key as configuration:{}:{}. Error message: {}".format(meta_name, run_type, key_error) ) meta_value_dict[SAMPLE] = overwrite_value @@ -180,16 +178,14 @@ def _parse_new_meta_data_json(reduction_input, meta_name, unit_conversion_factor except ValueError as value_error: # Overwritten value error raise RuntimeError( - "JSON value of key configuration:{}:{} has a value error. Error message: {}" "".format( + "JSON value of key configuration:{}:{} has a value error. Error message: {}".format( meta_name, run_type, value_error ) ) except KeyError as key_error: # Required value cannot be found raise KeyError( - "JSON file shall have key as configuration:{}:{}. Error message: {}" "".format( - meta_name, run_type, key_error - ) + "JSON file shall have key as configuration:{}:{}. Error message: {}".format(meta_name, run_type, key_error) ) @@ -331,7 +327,7 @@ def set_meta_data( pass else: raise RuntimeError( - "Pixel size X ({}) and Y ({}) must be set together" "".format(smearing_pixel_size_x, smearing_pixel_size_y) + "Pixel size X ({}) and Y ({}) must be set together".format(smearing_pixel_size_x, smearing_pixel_size_y) ) # Add log value @@ -387,13 +383,11 @@ def get_sample_detector_offset( # read sample log for SampleToSi and convert to meter from mm sample_to_si = sample_logs.find_log_with_units(sample_si_meta_name, "mm") * 1e-3 logger.notice( - "[META INIT] User SSD = {}, SWD = {}," "".format( - overwrite_sample_detector_distance, overwrite_sample_si_distance - ) + "[META INIT] User SSD = {}, SWD = {},".format(overwrite_sample_detector_distance, overwrite_sample_si_distance) ) logger.notice("[META] EPICS Sample to Si = {} meter".format(sample_to_si)) logger.notice( - "[META] Hardcoded Sample to nominal distance = {} meter" "".format(zero_sample_offset_sample_si_distance) + "[META] Hardcoded Sample to nominal distance = {} meter".format(zero_sample_offset_sample_si_distance) ) # Offsets: shift both sample and detector to conserve sample-detector distance @@ -417,9 +411,9 @@ def get_sample_detector_offset( # Shift the sample position only without moving detector overwrite_offset = sample_to_si - overwrite_sample_si_distance logger.notice( - "[META-Overwrite SSD] SampleToSi = {}, SampleToSiOverwrite = {}, " - "Original SampleOffset = {}" - "".format(sample_to_si, overwrite_sample_si_distance, sample_offset) + "[META-Overwrite SSD] SampleToSi = {}, SampleToSiOverwrite = {}, Original SampleOffset = {}".format( + sample_to_si, overwrite_sample_si_distance, sample_offset + ) ) sample_offset += overwrite_offset real_sample_det_distance -= overwrite_offset diff --git a/src/drtsans/mono/spice_data.py b/src/drtsans/mono/spice_data.py index 54c8b7336..f98a654e8 100644 --- a/src/drtsans/mono/spice_data.py +++ b/src/drtsans/mono/spice_data.py @@ -104,9 +104,7 @@ def unique_nexus_name(self, nexus_dir=None, raise_if_not_exist=False): nexus_path = os.path.join(nexus_dir, base_nexus_name) if raise_if_not_exist and not os.path.exists(nexus_path): - raise RuntimeError( - f"Spice converted Nexus file {base_nexus_name} does not exist in " f"directory {nexus_dir}" - ) + raise RuntimeError(f"Spice converted Nexus file {base_nexus_name} does not exist in directory {nexus_dir}") return nexus_path diff --git a/src/drtsans/path.py b/src/drtsans/path.py index 1c85fffd8..186dfb5eb 100644 --- a/src/drtsans/path.py +++ b/src/drtsans/path.py @@ -154,7 +154,7 @@ def abspath(path: str, instrument="", ipts="", directory=None, search_archive=Tr if os.path.exists(option): return option - raise RuntimeError("None of the locations suggested by ONCat contain " 'existing files for "{}"'.format(path)) + raise RuntimeError('None of the locations suggested by ONCat contain existing files for "{}"'.format(path)) def abspaths(runnumbers, instrument="", ipts="", directory=None, search_archive=True): diff --git a/src/drtsans/pixel_calibration.py b/src/drtsans/pixel_calibration.py index 429f3c237..79a6222ea 100644 --- a/src/drtsans/pixel_calibration.py +++ b/src/drtsans/pixel_calibration.py @@ -324,7 +324,7 @@ def compose_table_name(cls, metadata): str """ m = metadata # handy shortcut - return f'{m["caltype"].lower()}_{m["instrument"]}_{m["component"]}_{str(m["daystamp"])}' + return f"{m['caltype'].lower()}_{m['instrument']}_{m['component']}_{str(m['daystamp'])}" @classmethod def load(cls, database, caltype, instrument, component, daystamp, output_workspace=None): @@ -1106,7 +1106,7 @@ def fit_positions( # evaluate the heights. All should be positive calculated_heights = np.polynomial.polynomial.polyval(np.arange(tube_pixels), deriv_coefficients) if permissive is False and calculated_heights[calculated_heights <= 0.0].size > 0: - raise ValueError(f"Some of the calculated heights are negative.\n" f"Heights = {calculated_heights}") + raise ValueError(f"Some of the calculated heights are negative.\nHeights = {calculated_heights}") except Exception: coefficients = np.ones(int(order)) * np.nan calculated_positions = np.ones(tube_pixels) * np.nan diff --git a/src/drtsans/prepare_sensivities_correction.py b/src/drtsans/prepare_sensivities_correction.py index ba99a611c..e5315abc2 100644 --- a/src/drtsans/prepare_sensivities_correction.py +++ b/src/drtsans/prepare_sensivities_correction.py @@ -167,8 +167,7 @@ def _set_mask_value(flood_workspace, det_mask_array, use_moving_detector_method= # Array if len(problematic_pixels) > 0: raise RuntimeError( - f"Impossible case: pixels {problematic_pixels} has local detector mask is on, " - f"but total mask is off" + f"Impossible case: pixels {problematic_pixels} has local detector mask is on, but total mask is off" ) logger.debug( @@ -726,7 +725,7 @@ def execute( for i in range(num_workspaces_set): flood_ws_i = self._prepare_flood_data(self._flood_runs[i], beam_centers[i], self._dark_current_runs[i]) flood_workspaces.append(flood_ws_i) - logger.notice(f"Load {i}-th flood run {self._flood_runs[i]} to " f"{flood_ws_i}") + logger.notice(f"Load {i}-th flood run {self._flood_runs[i]} to {flood_ws_i}") # Retrieve masked detectors before masking the beam center. These are termed "bad pixels" if not use_moving_detector_method: diff --git a/src/drtsans/redparams.py b/src/drtsans/redparams.py index c43d29b50..f4f24662e 100644 --- a/src/drtsans/redparams.py +++ b/src/drtsans/redparams.py @@ -536,7 +536,7 @@ def __str__(self): items = [] for each in self.schema["anyOf"]: items.append(each["type"]) - msg += f'\nType: {", ".join(items)}' + msg += f"\nType: {', '.join(items)}" if "enum" in self.schema.keys(): msg += f"\nType: enum, values: {self.schema['enum']}" if "preferredType" in self.schema.keys(): @@ -783,7 +783,7 @@ def _initialize_parameters(self, schema=None, parameters=None): available_properties = list(schema.get("properties", {}).keys()) available_properties.extend(list(schema.get("additionalProperties", {}).keys())) not_found_message = ( - f"Parameter {name} not found in the schema. " f"Available properties are: {available_properties}" + f"Parameter {name} not found in the schema. Available properties are: {available_properties}" ) if self._permissible: logger.warning(not_found_message) @@ -1206,7 +1206,7 @@ def _validate_wedge_sources(self, validator, value, instance, schema): source_set_valid_found = True # all instances in the source set are not empty. It's a valid set if source_set_valid_found is False: yield jsonschema.ValidationError( - f"We cannot define the wedge angles given the current" f"values or parameters {value}" + f"We cannot define the wedge angles given the currentvalues or parameters {value}" ) def _validate_flux_file_tof(self, validator, value, instance, schema): diff --git a/src/drtsans/reductionlog.py b/src/drtsans/reductionlog.py index 17d3e8979..ec656ecbf 100644 --- a/src/drtsans/reductionlog.py +++ b/src/drtsans/reductionlog.py @@ -400,12 +400,12 @@ def savereductionlog(filename="", detectordata=None, **kwargs): if type(detectordata) is not dict: raise RuntimeError( - "detectordata has the wrong type. It should be a dictionary " "and not a {}".format(type(detectordata)) + "detectordata has the wrong type. It should be a dictionary and not a {}".format(type(detectordata)) ) for _slice_name in detectordata.keys(): if type(detectordata[_slice_name]) is not dict: raise RuntimeError( - "detectordata value has the wrong type. It should be a dictionary " "and not a {}".format( + "detectordata value has the wrong type. It should be a dictionary and not a {}".format( type(detectordata[_slice_name]) ) ) @@ -428,7 +428,7 @@ def savereductionlog(filename="", detectordata=None, **kwargs): if logslicedata: if type(logslicedata) is not dict: raise RuntimeError( - "logslicedata has the wrong type. It should a dictionary " "and not a {}".format(type(logslicedata)) + "logslicedata has the wrong type. It should a dictionary and not a {}".format(type(logslicedata)) ) if len(logslicedata.keys()) > len(detectordata.keys()): diff --git a/src/drtsans/resolution.py b/src/drtsans/resolution.py index 446ab58ad..a16576e48 100644 --- a/src/drtsans/resolution.py +++ b/src/drtsans/resolution.py @@ -61,7 +61,7 @@ def __str__(self): Nice output string :return: """ - out = "L1 = {} (m)\nSample-Detector-Center-Distance (L2)= {} (m)\n" "".format( + out = "L1 = {} (m)\nSample-Detector-Center-Distance (L2)= {} (m)\n".format( self.l1, self._sample_det_center_dist ) out += "Source aperture radius (R1) = {} (m)\n".format(self._source_aperture) diff --git a/src/drtsans/samplelogs.py b/src/drtsans/samplelogs.py index dc180cab1..51161c560 100644 --- a/src/drtsans/samplelogs.py +++ b/src/drtsans/samplelogs.py @@ -144,9 +144,9 @@ def periodic_index_log( # then truncate to the length of times, then cast to list entries = np.tile(values_in_period, period_count)[: len(times)].tolist() - assert len(times) == len( - entries - ), f"times and entries must have the same length: len(times) {len(times)} != len(entries) {len(entries)}" + assert len(times) == len(entries), ( + f"times and entries must have the same length: len(times) {len(times)} != len(entries) {len(entries)}" + ) return time_series(name, times, entries, run_start, unit="") @@ -307,7 +307,7 @@ def from_string(s): if len(finders) == 0: # In case no items found raise RuntimeError( - 'Input "other" of value {} is not supported to retrieve Mantid ' '"run" object'.format(other) + 'Input "other" of value {} is not supported to retrieve Mantid "run" object'.format(other) ) finder = finders[0] return finder(other) diff --git a/src/drtsans/sensitivity_correction_moving_detectors.py b/src/drtsans/sensitivity_correction_moving_detectors.py index e13bc452e..4688fab9b 100644 --- a/src/drtsans/sensitivity_correction_moving_detectors.py +++ b/src/drtsans/sensitivity_correction_moving_detectors.py @@ -100,7 +100,7 @@ def _mask_zero_count_pixel(flood_data_matrix, flood_sigma_matrix): """ # get the zero count elments zero_count_elements = flood_data_matrix < 1e-12 - logger.notice(f"Input flood runs: total {len(np.where(zero_count_elements)[0])} are " f"masked") + logger.notice(f"Input flood runs: total {len(np.where(zero_count_elements)[0])} are masked") # set to NaN flood_data_matrix[zero_count_elements] = np.nan diff --git a/src/drtsans/sensitivity_correction_patch.py b/src/drtsans/sensitivity_correction_patch.py index 1b9c038f3..82214ee17 100644 --- a/src/drtsans/sensitivity_correction_patch.py +++ b/src/drtsans/sensitivity_correction_patch.py @@ -151,7 +151,7 @@ def calculate_sensitivity_correction( # This shall be an option later if len(xx) < min_detectors_per_tube: logger.error( - "Skipping tube with indices {} with {} non-masked value. Too many " "masked or dead pixels.".format( + "Skipping tube with indices {} with {} non-masked value. Too many masked or dead pixels.".format( j, len(xx) ) ) diff --git a/src/drtsans/tof/eqsans/api.py b/src/drtsans/tof/eqsans/api.py index 3b5877e2f..fe241c2fc 100644 --- a/src/drtsans/tof/eqsans/api.py +++ b/src/drtsans/tof/eqsans/api.py @@ -803,9 +803,7 @@ def reduce_single_configuration( if correction_setup.do_elastic_correction and correction_setup.elastic_reference: # sanity check - assert loaded_ws.elastic_reference.data, ( - f"Reference run is not loaded: " f"{correction_setup.elastic_reference}" - ) + assert loaded_ws.elastic_reference.data, f"Reference run is not loaded: {correction_setup.elastic_reference}" ############################################## # PROCESS SAMPLE AND BACKGROUND TRANSMISSIONS @@ -1128,7 +1126,7 @@ def process_auto_wedge( ] # this is a parameter from json file wedges = getWedgeSelection(iq2d_input, **auto_wedge_setup) - logger.notice(f"found wedge angles:\n" f" peak: {wedges[0]}\n" f" background: {wedges[1]}") + logger.notice(f"found wedge angles:\n peak: {wedges[0]}\n background: {wedges[1]}") # sanity check assert len(wedges) == 2, f"Auto-wedges {wedges} shall have 2 2-tuples" # set automated wedge to reduction configuration for correct plotting. diff --git a/src/drtsans/tof/eqsans/correction_api.py b/src/drtsans/tof/eqsans/correction_api.py index 26bceca3d..adac81128 100644 --- a/src/drtsans/tof/eqsans/correction_api.py +++ b/src/drtsans/tof/eqsans/correction_api.py @@ -203,7 +203,7 @@ def __init__( if trans_run_number is None and trans_value is None: raise RuntimeError("Either transmission run or transmission value shall be given.") elif trans_run_number and trans_value: - raise RuntimeError("Either transmission run or transmission value can be given, but " "not both") + raise RuntimeError("Either transmission run or transmission value can be given, but not both") # Background self.background_run_number = None @@ -224,9 +224,7 @@ def set_background( if trans_run_number is None and trans_value is None: raise RuntimeError("Either background transmission run or transmission value shall be given.") elif trans_run_number and trans_value: - raise RuntimeError( - "Either background transmission run or transmission value can be given, but " "not both" - ) + raise RuntimeError("Either background transmission run or transmission value can be given, but not both") def parse_correction_config( diff --git a/src/drtsans/tof/eqsans/elastic_reference_normalization.py b/src/drtsans/tof/eqsans/elastic_reference_normalization.py index 158e71247..6231d4c40 100644 --- a/src/drtsans/tof/eqsans/elastic_reference_normalization.py +++ b/src/drtsans/tof/eqsans/elastic_reference_normalization.py @@ -494,7 +494,7 @@ def determine_reference_wavelength_q1d_mesh( """ # Sanity check assert wavelength_vec.shape[0] == intensity_array.shape[1], ( - f"Wavelength dimension = {wavelength_vec.shape} ," f"Intensity dimension = {intensity_array.shape}" + f"Wavelength dimension = {wavelength_vec.shape} ,Intensity dimension = {intensity_array.shape}" ) # Minimum wavelength bin is the reference wavelength diff --git a/src/drtsans/tof/eqsans/incoherence_correction_1d.py b/src/drtsans/tof/eqsans/incoherence_correction_1d.py index 84cbf830d..7d4ff2ddf 100644 --- a/src/drtsans/tof/eqsans/incoherence_correction_1d.py +++ b/src/drtsans/tof/eqsans/incoherence_correction_1d.py @@ -471,9 +471,9 @@ def correct_intensity_error( assert intensity_array.shape == error_array.shape assert wavelength_vec.shape[0] == intensity_array.shape[1] assert q_vec.shape[0] == error_array.shape[0] - assert ( - len(b_array2d.shape) == 2 and b_array2d.shape[0] == 2 - ), f"Expected input B and B error but not of shape {b_array2d.shape}" + assert len(b_array2d.shape) == 2 and b_array2d.shape[0] == 2, ( + f"Expected input B and B error but not of shape {b_array2d.shape}" + ) assert b_array2d.shape[1] == wavelength_vec.shape[0] # Init data structure diff --git a/src/drtsans/tof/eqsans/meta_data.py b/src/drtsans/tof/eqsans/meta_data.py index b9dfa29a4..d4f0e9d66 100644 --- a/src/drtsans/tof/eqsans/meta_data.py +++ b/src/drtsans/tof/eqsans/meta_data.py @@ -72,7 +72,7 @@ def set_meta_data( pass else: raise RuntimeError( - "Pixel size X ({}) and Y ({}) must be set together" "".format(smearing_pixel_size_x, smearing_pixel_size_y) + "Pixel size X ({}) and Y ({}) must be set together".format(smearing_pixel_size_x, smearing_pixel_size_y) ) # Add log value diff --git a/src/drtsans/tof/eqsans/reduction_api.py b/src/drtsans/tof/eqsans/reduction_api.py index 02f8ffd20..e65ea8ef2 100644 --- a/src/drtsans/tof/eqsans/reduction_api.py +++ b/src/drtsans/tof/eqsans/reduction_api.py @@ -330,9 +330,7 @@ def bin_i_with_correction( # Check due to functional limitation assert isinstance(iq1d_main_wl, list), f"Output I(Q) must be a list but not a {type(iq1d_main_wl)}" if len(iq1d_main_wl) != 1: - raise NotImplementedError( - f"Not expected that there are more than 1 IQmod main but " f"{len(iq1d_main_wl)}" - ) + raise NotImplementedError(f"Not expected that there are more than 1 IQmod main but {len(iq1d_main_wl)}") else: ... @@ -421,7 +419,7 @@ def bin_i_with_correction( finite_iq2d = iq2d_main_wl.be_finite() # Bin binned I(Q1D, wl) and and binned I(Q2D, wl) in wavelength space assert len(iq1d_main_wl) == 1, ( - f"It is assumed that output I(Q) list contains 1 I(Q)" f" but not {len(iq1d_main_wl)}" + f"It is assumed that output I(Q) list contains 1 I(Q) but not {len(iq1d_main_wl)}" ) # Bin output in wavelength space diff --git a/tests/conftest.py b/tests/conftest.py index c1650849a..29aa19b26 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -502,7 +502,7 @@ def _getDataDimensions(req_params): Ny = int(Ny) if intensity.size % (Nx * Ny) != 0: raise RuntimeError( - "Supplied Nx={}, Ny={} not compatible with " "intensities[{}]".format(Nx, Ny, intensity.shape) + "Supplied Nx={}, Ny={} not compatible with intensities[{}]".format(Nx, Ny, intensity.shape) ) else: return Nx, Ny, int(intensity.size / (Nx * Ny)) diff --git a/tests/integration/drtsans/mono/biosans/test_create_event_nexus.py b/tests/integration/drtsans/mono/biosans/test_create_event_nexus.py index 14593a6fa..7f1e97cfe 100644 --- a/tests/integration/drtsans/mono/biosans/test_create_event_nexus.py +++ b/tests/integration/drtsans/mono/biosans/test_create_event_nexus.py @@ -422,7 +422,7 @@ def verify_reduction_results(sample_names, output_dir, gold_path, title, prefix, try: compare_reduced_iq(output_log_file, gold_log_file, title_i, prefix, rel_tol=rel_tol) except AssertionError as unmatched_error: - unmatched_errors = "Testing output {} is different from gold result {}:\n{}" "".format( + unmatched_errors = "Testing output {} is different from gold result {}:\n{}".format( output_log_file, gold_log_file, unmatched_error ) # END-FOR diff --git a/tests/integration/drtsans/mono/biosans/test_load.py b/tests/integration/drtsans/mono/biosans/test_load.py index 76e70de00..ae57fde84 100644 --- a/tests/integration/drtsans/mono/biosans/test_load.py +++ b/tests/integration/drtsans/mono/biosans/test_load.py @@ -65,14 +65,14 @@ def test_load_all_files(has_sns_mount, reference_dir): # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/issues/542#note_156296 for ws in [sample_run, beam_center_run, bkgd_run, empty_trans_run]: sample_pos_z = ws.getInstrument().getSample().getPos()[2] - assert sample_pos_z == pytest.approx(-0.12952, 0.000004), ( - "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + assert sample_pos_z == pytest.approx(-0.12952, 0.000004), "{} has a wrong sample Si-window distance {}".format( + str(ws), sample_pos_z ) for ws in [dark_run]: sample_pos_z = ws.getInstrument().getSample().getPos()[2] - assert sample_pos_z == pytest.approx(0.0000, 0.000004), ( - "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + assert sample_pos_z == pytest.approx(0.0000, 0.000004), "{} has a wrong sample Si-window distance {}".format( + str(ws), sample_pos_z ) # Verify sample to detector distance with default setup: @@ -93,12 +93,12 @@ def test_load_all_files(has_sns_mount, reference_dir): sample_log_i = SampleLogs(ws) pixel_size_x = sample_log_i["smearingPixelSizeX"].value pixel_size_y = sample_log_i["smearingPixelSizeY"].value - assert pixel_size_x == pytest.approx( - 1.23456 * 1.0e-3, 1.0e-7 - ), "{}-th workspace: Pixel size X {} (m) shall be equal to 1.23456 mm".format(ws_index, pixel_size_x) - assert pixel_size_y == pytest.approx( - 2.34567 * 1.0e-3, 1.0e-7 - ), "{}-th workspace: Pixel size X {} (m) shall be equal to 2.34567 mm".format(ws_index, pixel_size_y) + assert pixel_size_x == pytest.approx(1.23456 * 1.0e-3, 1.0e-7), ( + "{}-th workspace: Pixel size X {} (m) shall be equal to 1.23456 mm".format(ws_index, pixel_size_x) + ) + assert pixel_size_y == pytest.approx(2.34567 * 1.0e-3, 1.0e-7), ( + "{}-th workspace: Pixel size X {} (m) shall be equal to 2.34567 mm".format(ws_index, pixel_size_y) + ) # Check center wave length and spread for ws_index, ws in enumerate([sample_run, beam_center_run, bkgd_run, empty_trans_run]): @@ -106,11 +106,11 @@ def test_load_all_files(has_sns_mount, reference_dir): wave_length = sample_log_i["wavelength"].value[0] wave_length_spread = sample_log_i["wavelength_spread"].value[0] # overwriting value - assert wave_length == pytest.approx( - 1.23, 1.0e-7 - ), "{}-th workspace: wave length {} shall be equal to 1.23 angstrom".format(ws_index, wave_length) + assert wave_length == pytest.approx(1.23, 1.0e-7), ( + "{}-th workspace: wave length {} shall be equal to 1.23 angstrom".format(ws_index, wave_length) + ) assert wave_length_spread == pytest.approx(0.46, 1.0e-7), ( - "{}-th workspace: wave length spread {} shall be equal to 0.46 angstrom" "".format( + "{}-th workspace: wave length spread {} shall be equal to 0.46 angstrom".format( ws_index, wave_length_spread ) ) @@ -120,11 +120,11 @@ def test_load_all_files(has_sns_mount, reference_dir): wave_length = sample_log_i["wavelength"].value[0] wave_length_spread = sample_log_i["wavelength_spread"].value[0] # original value - assert wave_length == pytest.approx( - 6.00881338, 1.0e-7 - ), "{}-th workspace: wave length {} shall be equal to 6.00881338 angstrom".format(ws_index, wave_length) + assert wave_length == pytest.approx(6.00881338, 1.0e-7), ( + "{}-th workspace: wave length {} shall be equal to 6.00881338 angstrom".format(ws_index, wave_length) + ) assert wave_length_spread == pytest.approx(0.1323529411, 1.0e-7), ( - "{}-th workspace: wave length spread {} shall be equal to 0.13235294 angstrom" "".format( + "{}-th workspace: wave length spread {} shall be equal to 0.13235294 angstrom".format( ws_index, wave_length_spread ) ) diff --git a/tests/integration/drtsans/mono/biosans/test_overwrite_geometry_meta_data.py b/tests/integration/drtsans/mono/biosans/test_overwrite_geometry_meta_data.py index 0fa60723b..68461766a 100644 --- a/tests/integration/drtsans/mono/biosans/test_overwrite_geometry_meta_data.py +++ b/tests/integration/drtsans/mono/biosans/test_overwrite_geometry_meta_data.py @@ -388,7 +388,7 @@ def verify_reduction_results(sample_names, output_dir, gold_path, title, prefix) try: compare_reduced_iq(output_log_file, gold_log_file, title_i, prefix) except AssertionError as unmatched_error: - unmatched_errors += "Testing output {} does not match gold result {}:\n{}\n" "".format( + unmatched_errors += "Testing output {} does not match gold result {}:\n{}\n".format( output_log_file, gold_log_file, unmatched_error ) # END-FOR diff --git a/tests/integration/drtsans/mono/biosans/test_prepare_sensitivities_spice.py b/tests/integration/drtsans/mono/biosans/test_prepare_sensitivities_spice.py index e368324e0..4fbe6b71a 100644 --- a/tests/integration/drtsans/mono/biosans/test_prepare_sensitivities_spice.py +++ b/tests/integration/drtsans/mono/biosans/test_prepare_sensitivities_spice.py @@ -13,7 +13,7 @@ warnings.simplefilter(action="ignore", category=FutureWarning) workspaces = [ - "BC_CG3_/SNS/EQSANS/shared/sans-backend/data/ornl/sans/hfir/biosans/CG3_054900200001.nxs.h5" "BIOSANS_54900200001", + "BC_CG3_/SNS/EQSANS/shared/sans-backend/data/ornl/sans/hfir/biosans/CG3_054900200001.nxs.h5BIOSANS_54900200001", "BIOSANS_54900200001_sensitivity", "BIOSANS_54900200001_sensitivity_new", "BIOSANS_54900220001", diff --git a/tests/integration/drtsans/mono/biosans/test_reduction_spice.py b/tests/integration/drtsans/mono/biosans/test_reduction_spice.py index e4cb4c989..6bf435df3 100644 --- a/tests/integration/drtsans/mono/biosans/test_reduction_spice.py +++ b/tests/integration/drtsans/mono/biosans/test_reduction_spice.py @@ -196,7 +196,7 @@ def verify_reduction_results(sample_names, test_log_files, gold_files, title, pr try: compare_reduced_iq(output_log_file, gold_log_file, title_i, prefix) except AssertionError as unmatched_error: - unmatched_errors += "Testing output {} does not match gold result {}:\n{}\n" "".format( + unmatched_errors += "Testing output {} does not match gold result {}:\n{}\n".format( output_log_file, gold_log_file, unmatched_error ) # END-FOR diff --git a/tests/integration/drtsans/mono/gpsans/test_find_beam_center.py b/tests/integration/drtsans/mono/gpsans/test_find_beam_center.py index cb5c20f11..d69386719 100644 --- a/tests/integration/drtsans/mono/gpsans/test_find_beam_center.py +++ b/tests/integration/drtsans/mono/gpsans/test_find_beam_center.py @@ -39,7 +39,7 @@ def test_gpsans_find_beam_center(datarepo_dir): beam_center_shift = np.sqrt((center_x - det_center[0]) ** 2 + (center_y - det_center[1]) ** 2) assert beam_center_shift == pytest.approx(0.400, abs=0.007), ( - "Beam center shift {} to {} is beyond" "0.4 +/- 7E-3".format(beam_center, det_center) + "Beam center shift {} to {} is beyond0.4 +/- 7E-3".format(beam_center, det_center) ) # cleanup diff --git a/tests/integration/drtsans/mono/gpsans/test_load.py b/tests/integration/drtsans/mono/gpsans/test_load.py index 447de0ceb..d9daef3fa 100644 --- a/tests/integration/drtsans/mono/gpsans/test_load.py +++ b/tests/integration/drtsans/mono/gpsans/test_load.py @@ -72,8 +72,8 @@ def test_load_all_files(datarepo_dir): # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/issues/542#note_156296 for ws in [sample_run, sample_trans_run, bkgd_run, bkgd_trans_run]: sample_pos_z = ws.getInstrument().getSample().getPos()[2] - assert sample_pos_z == pytest.approx(-0.23456, 0.000004), ( - "{} has a wrong sample Si-window distance {}" "".format(str(ws), sample_pos_z) + assert sample_pos_z == pytest.approx(-0.23456, 0.000004), "{} has a wrong sample Si-window distance {}".format( + str(ws), sample_pos_z ) # Verify sample to detector distance with default setup: @@ -89,12 +89,12 @@ def test_load_all_files(datarepo_dir): sample_log_i = SampleLogs(ws) pixel_size_x = sample_log_i["smearingPixelSizeX"].value pixel_size_y = sample_log_i["smearingPixelSizeY"].value - assert pixel_size_x == pytest.approx( - 1.2345 * 1.0e-3, 1.0e-7 - ), "{}-th workspace: Pixel size X {} (m) shall be equal to 1.2345 mm".format(ws_index, pixel_size_x) - assert pixel_size_y == pytest.approx( - 2.3456 * 1.0e-3, 1.0e-7 - ), "{}-th workspace: Pixel size X {} (m) shall be equal to 2.3456 mm".format(ws_index, pixel_size_y) + assert pixel_size_x == pytest.approx(1.2345 * 1.0e-3, 1.0e-7), ( + "{}-th workspace: Pixel size X {} (m) shall be equal to 1.2345 mm".format(ws_index, pixel_size_x) + ) + assert pixel_size_y == pytest.approx(2.3456 * 1.0e-3, 1.0e-7), ( + "{}-th workspace: Pixel size X {} (m) shall be equal to 2.3456 mm".format(ws_index, pixel_size_y) + ) # Check center wave length and spread for ws_index, ws in enumerate([sample_run, sample_trans_run, bkgd_run, bkgd_trans_run]): @@ -102,11 +102,11 @@ def test_load_all_files(datarepo_dir): wave_length = sample_log_i["wavelength"].value[0] wave_length_spread = sample_log_i["wavelength_spread"].value[0] # overwriting value - assert wave_length == pytest.approx( - 1.23, 1.0e-7 - ), "{}-th workspace: wave length {} shall be equal to 1.23 angstrom".format(ws_index, wave_length) + assert wave_length == pytest.approx(1.23, 1.0e-7), ( + "{}-th workspace: wave length {} shall be equal to 1.23 angstrom".format(ws_index, wave_length) + ) assert wave_length_spread == pytest.approx(0.1323529411, 1.0e-7), ( - "{}-th workspace: wave length spread {} shall be equal to 0.46 angstrom" "".format( + "{}-th workspace: wave length spread {} shall be equal to 0.46 angstrom".format( ws_index, wave_length_spread ) ) diff --git a/tests/integration/drtsans/mono/gpsans/test_pixel_map_spice.py b/tests/integration/drtsans/mono/gpsans/test_pixel_map_spice.py index 3812317a0..578fe9c32 100644 --- a/tests/integration/drtsans/mono/gpsans/test_pixel_map_spice.py +++ b/tests/integration/drtsans/mono/gpsans/test_pixel_map_spice.py @@ -78,9 +78,9 @@ def test_pixel_calibration(datarepo_dir, temp_directory): datarepo_dir.gpsans, f"calibrations/CG2_Pixel_Calibration_Expected_{last_pt - first_pt + 1}.nxs", ) - assert os.path.exists( - expected_calibration_table - ), f"Gold result (file) {expected_calibration_table} cannot be found." + assert os.path.exists(expected_calibration_table), ( + f"Gold result (file) {expected_calibration_table} cannot be found." + ) compare_pixel_calibration_files(calibration_table, expected_calibration_table) diff --git a/tests/integration/drtsans/mono/test_load.py b/tests/integration/drtsans/mono/test_load.py index df1e965d5..84f64e71d 100644 --- a/tests/integration/drtsans/mono/test_load.py +++ b/tests/integration/drtsans/mono/test_load.py @@ -35,7 +35,7 @@ def test_load_gpsans(datarepo_dir, clean_workspace): print("[TEST INFO] SampleToSi = {} mm".format(logs.find_log_with_units("CG2:CS:SampleToSi", unit="mm"))) raw_sample_det_distance = sample_detector_distance(ws, unit="m", search_logs=False) print( - "[TEST INFO] Sample to detector distance = {} /{} meter" "".format( + "[TEST INFO] Sample to detector distance = {} /{} meter".format( raw_sample_det_distance, sample_detector_distance(ws, unit="m", log_key="sample_detector_distance", search_logs=True), ) @@ -92,7 +92,7 @@ def test_load_biosans(datarepo_dir, clean_workspace): print("[TEST INFO] (Raw) sampleToSi = {} mm".format(logs.find_log_with_units("CG3:CS:SampleToSi", unit="mm"))) raw_sample_det_distance = sample_detector_distance(ws) print( - "[TEST INFO] (Raw) sample to detector distance = {} /{} meter" "".format( + "[TEST INFO] (Raw) sample to detector distance = {} /{} meter".format( raw_sample_det_distance, sample_detector_distance(ws, log_key="sample_detector_distance", search_logs=True), ) @@ -102,7 +102,7 @@ def test_load_biosans(datarepo_dir, clean_workspace): # sample and detector offsets can only be retrieved from a loaded workspace # This is a technical debt sample_offset, detector_offset = get_sample_detector_offset(ws, "CG3:CS:SampleToSi", 71.0 * 1e-3) - print("[TEST INFO] Sample offset = {}, Detector offset = {}" "".format(sample_offset, detector_offset)) + print("[TEST INFO] Sample offset = {}, Detector offset = {}".format(sample_offset, detector_offset)) # Verify: No sample offset from nominal position (origin) assert sample_offset == pytest.approx(0.0, 1e-12) @@ -162,7 +162,7 @@ def test_load_biosans_sample_off_nominal(datarepo_dir, clean_workspace): # Calculate offset without any overwriting sample_offset, detector_offset = get_sample_detector_offset(ws, "CG3:CS:SampleToSi", 71.0 * 1e-3) - print("[TEST INFO 2] Sample offset = {}, Detector offset = {}" "".format(sample_offset, detector_offset)) + print("[TEST INFO 2] Sample offset = {}, Detector offset = {}".format(sample_offset, detector_offset)) # Both sample and detector shall move toward souce (-Y direction) with (74.21 - 71.) = 3.21 mm assert sample_offset == pytest.approx(-0.00321, 1e-12) @@ -214,7 +214,7 @@ def test_load_biosans_overwrite_swd(datarepo_dir, clean_workspace): sample_offset, detector_offset = get_sample_detector_offset( ws, "CG3:CS:SampleToSi", 71.0 * 1e-3, overwrite_sample_si_distance=0.07421 ) - print("[TEST INFO] Sample offset = {}, Detector offset = {}" "".format(sample_offset, detector_offset)) + print("[TEST INFO] Sample offset = {}, Detector offset = {}".format(sample_offset, detector_offset)) # Move sample and detector ws = move_instrument( @@ -268,7 +268,7 @@ def test_load_biosans_overwrite_sdd(datarepo_dir, clean_workspace): print("[TEST INFO] SampleToSi = {} mm".format(logs.find_log_with_units("CG3:CS:SampleToSi", unit="mm"))) raw_sample_det_distance = sample_detector_distance(ws) print( - "[TEST INFO] Sample to detector distance = {} /{} meter" "".format( + "[TEST INFO] Sample to detector distance = {} /{} meter".format( raw_sample_det_distance, sample_detector_distance(ws, log_key="sample_detector_distance", search_logs=True), ) @@ -278,7 +278,7 @@ def test_load_biosans_overwrite_sdd(datarepo_dir, clean_workspace): sample_offset, detector_offset = get_sample_detector_offset( ws, "CG3:CS:SampleToSi", 71.0 * 1e-3, overwrite_sample_detector_distance=7.1234 ) - print("[TEST INFO] Sample offset = {}, Detector offset = {}" "".format(sample_offset, detector_offset)) + print("[TEST INFO] Sample offset = {}, Detector offset = {}".format(sample_offset, detector_offset)) # Move sample and detector ws = move_instrument( diff --git a/tests/integration/drtsans/test_auto_wedge.py b/tests/integration/drtsans/test_auto_wedge.py index 05e520196..a5f9eaea6 100644 --- a/tests/integration/drtsans/test_auto_wedge.py +++ b/tests/integration/drtsans/test_auto_wedge.py @@ -537,15 +537,15 @@ def test_integration(): # verify background # first background - the extra 360 is to get around the circle - assert 0.5 * (back_wedge[0][0] + back_wedge[0][1] + 360) == pytest.approx( - 272.0, abs=1.2 - ), "First background center is at 270." + assert 0.5 * (back_wedge[0][0] + back_wedge[0][1] + 360) == pytest.approx(272.0, abs=1.2), ( + "First background center is at 270." + ) assert back_wedge[0][0] == pytest.approx(255.0, abs=0.5) assert back_wedge[0][1] == pytest.approx(-69.0, abs=0.5) # second background - assert 0.5 * (back_wedge[1][0] + back_wedge[1][1]) == pytest.approx( - 90.0, abs=5.0 - ), "Second background center is at 90." + assert 0.5 * (back_wedge[1][0] + back_wedge[1][1]) == pytest.approx(90.0, abs=5.0), ( + "Second background center is at 90." + ) assert back_wedge[1][0] == pytest.approx(76.0, abs=0.5) assert back_wedge[1][1] == pytest.approx(111.0, abs=0.5) diff --git a/tests/integration/drtsans/tof/eqsans/test_reduction_api.py b/tests/integration/drtsans/tof/eqsans/test_reduction_api.py index 92dde5b07..4d5aa28c6 100644 --- a/tests/integration/drtsans/tof/eqsans/test_reduction_api.py +++ b/tests/integration/drtsans/tof/eqsans/test_reduction_api.py @@ -396,7 +396,7 @@ def export_iq_comparison(iq1d_tuple_list: List[Tuple[str, IQmod, str]], png_name plt.legend() # save - plt.savefig(f'{png_name.split(".")[0]}_error_bar.png') + plt.savefig(f"{png_name.split('.')[0]}_error_bar.png") # close plt.close() @@ -446,9 +446,7 @@ def test_wavelength_step(has_sns_mount, reference_dir): "sampleApertureSize": "10", "fluxMonitorRatioFile": None, "sensitivityFileName": ( - "/SNS/EQSANS/shared/NeXusFiles/" - "EQSANS/2020A_mp/" - "Sensitivity_patched_thinPMMA_2o5m_113514_mantid.nxs" + "/SNS/EQSANS/shared/NeXusFiles/EQSANS/2020A_mp/Sensitivity_patched_thinPMMA_2o5m_113514_mantid.nxs" ), "numQBins": 100, "WedgeMinAngles": "-30, 60", @@ -615,15 +613,15 @@ def verify_processed_workspace( r = CompareWorkspaces(Workspace1=gold_ws, Workspace2=test_ws) print(f"[INT-TEST] Verify reduced workspace {test_ws} match expected/gold {gold_ws}: {r}") if r != "Success": - assert ( - gold_ws.getNumberHistograms() == test_ws.getNumberHistograms() - ), f"Histograms: {gold_ws.getNumberHistograms()} != {test_ws.getNumberHistograms()}" - assert ( - gold_ws.readY(0).shape == test_ws.readY(0).shape - ), f"Number of wavelength: {gold_ws.readY(0).shape} != {test_ws.readY(0).shape}" - assert ( - gold_ws.readX(0).shape == test_ws.readX(0).shape - ), f"Histogram or point data: {gold_ws.readX(0).shape} != {test_ws.readX(0).shape}" + assert gold_ws.getNumberHistograms() == test_ws.getNumberHistograms(), ( + f"Histograms: {gold_ws.getNumberHistograms()} != {test_ws.getNumberHistograms()}" + ) + assert gold_ws.readY(0).shape == test_ws.readY(0).shape, ( + f"Number of wavelength: {gold_ws.readY(0).shape} != {test_ws.readY(0).shape}" + ) + assert gold_ws.readX(0).shape == test_ws.readX(0).shape, ( + f"Histogram or point data: {gold_ws.readX(0).shape} != {test_ws.readX(0).shape}" + ) gold_x_array = gold_ws.extractX() test_x_array = test_ws.extractX() assert gold_x_array.shape == test_x_array.shape, "Q bins sizes are different" diff --git a/tests/unit/drtsans/i_of_q_binning_tests_data.py b/tests/unit/drtsans/i_of_q_binning_tests_data.py index 2f899e5f7..16c26442b 100644 --- a/tests/unit/drtsans/i_of_q_binning_tests_data.py +++ b/tests/unit/drtsans/i_of_q_binning_tests_data.py @@ -282,7 +282,7 @@ def generate_test_data_wavelength(q_dimension, num_wavelengths): (intensity, sigma, q, dq, wavelength) or (intensity, sigma, qx, dqx, qy, dqy, wavelength) """ - assert isinstance(num_wavelengths, int) and num_wavelengths > 0, "Number of wavelength must be greater" "than 0." + assert isinstance(num_wavelengths, int) and num_wavelengths > 0, "Number of wavelength must be greaterthan 0." if q_dimension == 1: # get initial 1D arrays diff --git a/tests/unit/drtsans/test_event_nexus_nodes.py b/tests/unit/drtsans/test_event_nexus_nodes.py index 526e6d3e2..3bd8b5a6e 100644 --- a/tests/unit/drtsans/test_event_nexus_nodes.py +++ b/tests/unit/drtsans/test_event_nexus_nodes.py @@ -173,7 +173,7 @@ def test_create_instrument_node(datarepo_dir): # attributes # cannot get b'NXinstrument' - assert source_instrument.attributes == test_node.attributes, "{} shall be same as {}" "".format( + assert source_instrument.attributes == test_node.attributes, "{} shall be same as {}".format( source_instrument.attributes, test_node.attributes ) diff --git a/tests/unit/drtsans/test_i_of_q_1d_binning.py b/tests/unit/drtsans/test_i_of_q_1d_binning.py index 0a3ea22f4..79d48e67a 100644 --- a/tests/unit/drtsans/test_i_of_q_1d_binning.py +++ b/tests/unit/drtsans/test_i_of_q_1d_binning.py @@ -64,19 +64,19 @@ def test_1d_bin_linear_no_wt(): # Calculate and verify # I(0.0035) = 68.92857: drtsans: 68.92857142857143 # verify Q[3] - assert abs(binned_iq.mod_q[3] - 0.0035) < 1e-6, "Q[3] {} shall be {} +/- 1e-6" "".format( + assert abs(binned_iq.mod_q[3] - 0.0035) < 1e-6, "Q[3] {} shall be {} +/- 1e-6".format( binned_iq.delta_mod_q[3], 0.0035 ) # verify I[3] - assert abs(binned_iq.intensity[3] - 68.92857) < 1e-5, "Intensity[3] shall be 68.92857 but not {}" "".format( + assert abs(binned_iq.intensity[3] - 68.92857) < 1e-5, "Intensity[3] shall be 68.92857 but not {}".format( binned_iq.intensity[3] ) # verify sigmaI[3] = 2.218889: assert abs(binned_iq.error[3] - 2.218889) < 1e-6, "error" # verify sigma_Q[3] = 1.154E-02 - assert binned_iq.delta_mod_q[3] == pytest.approx( - 1.154e-02, abs=2.0e-5 - ), "Linear binning: Q resolution {} does not match expected {}".format(binned_iq.delta_mod_q[3], 1.135e-02) + assert binned_iq.delta_mod_q[3] == pytest.approx(1.154e-02, abs=2.0e-5), ( + "Linear binning: Q resolution {} does not match expected {}".format(binned_iq.delta_mod_q[3], 1.135e-02) + ) def test_1d_bin_log_no_wt(): @@ -114,9 +114,9 @@ def test_1d_bin_log_no_wt(): # dI(0.0022) = 5.9160797831 assert binned_iq.error[4] == pytest.approx(3.51978534699048, abs=1.0e-12), "error" # sigma_Q(0.0022) = 1.135E-02 - assert binned_iq.delta_mod_q[4] == pytest.approx( - 1.154e-2, abs=2.0e-5 - ), "Log binning: Q resolution {} does not match expected {}".format(binned_iq.delta_mod_q[3], 1.135e-02) + assert binned_iq.delta_mod_q[4] == pytest.approx(1.154e-2, abs=2.0e-5), ( + "Log binning: Q resolution {} does not match expected {}".format(binned_iq.delta_mod_q[3], 1.135e-02) + ) def test_1d_bin_linear_no_wt_no_wl(): @@ -153,11 +153,11 @@ def test_1d_bin_linear_no_wt_no_wl(): # I(0.0035) = 68.92857: drtsans: 68.92857142857143 # verify Q[3] - assert abs(binned_iq_wl.mod_q[3] - 0.0035) < 1e-6, "Q[3] {} shall be {} +/- 1e-6" "".format( + assert abs(binned_iq_wl.mod_q[3] - 0.0035) < 1e-6, "Q[3] {} shall be {} +/- 1e-6".format( binned_iq_wl.delta_mod_q[3], 0.0035 ) assert abs(binned_iq_no_wl.mod_q[3] - 0.0035) < 1e-6, ( - f"Q[3] {binned_iq_wl.delta_mod_q[3]} shall be " f"{0.0035} +/- 1e-6" + f"Q[3] {binned_iq_wl.delta_mod_q[3]} shall be {0.0035} +/- 1e-6" ) # verify wavelength @@ -165,24 +165,24 @@ def test_1d_bin_linear_no_wt_no_wl(): assert binned_iq_wl.wavelength[3] == pytest.approx(1.5, 1e-5) # verify I[3] - assert abs(binned_iq_wl.intensity[3] - 68.92857) < 1e-5, "Intensity[3] shall be 68.92857 but not {}" "".format( + assert abs(binned_iq_wl.intensity[3] - 68.92857) < 1e-5, "Intensity[3] shall be 68.92857 but not {}".format( binned_iq_wl.intensity[3] ) - assert binned_iq_wl.intensity[3 + num_base_bins] == pytest.approx( - 68.92857 * 2, 1e-6 - ), f"diff = {binned_iq_wl.intensity[3 + num_base_bins] - 68.92857 * 2}" + assert binned_iq_wl.intensity[3 + num_base_bins] == pytest.approx(68.92857 * 2, 1e-6), ( + f"diff = {binned_iq_wl.intensity[3 + num_base_bins] - 68.92857 * 2}" + ) # 3 wavelengths, 3 times of sample points, 6 times of total intensity (simple sum). # thus the binned intensity is increased by 6/3 = 2 times - assert binned_iq_wl.intensity[3] * 2.0 == pytest.approx( - binned_iq_no_wl.intensity[3], 1e-6 - ), f"diff = {binned_iq_wl.intensity[3] * 6. - binned_iq_no_wl.intensity[3]}" + assert binned_iq_wl.intensity[3] * 2.0 == pytest.approx(binned_iq_no_wl.intensity[3], 1e-6), ( + f"diff = {binned_iq_wl.intensity[3] * 6.0 - binned_iq_no_wl.intensity[3]}" + ) # verify sigmaI[3] = 2.218889: assert abs(binned_iq_wl.error[3] - 2.218889) < 1e-6, "error" # verify sigma_Q[3] = 1.154E-02 - assert binned_iq_wl.delta_mod_q[3] == pytest.approx( - 1.154e-02, abs=2.0e-5 - ), "Linear binning: Q resolution {} does not match expected {}".format(binned_iq_wl.delta_mod_q[3], 1.135e-02) + assert binned_iq_wl.delta_mod_q[3] == pytest.approx(1.154e-02, abs=2.0e-5), ( + "Linear binning: Q resolution {} does not match expected {}".format(binned_iq_wl.delta_mod_q[3], 1.135e-02) + ) def test_1d_bin_wavelength(): diff --git a/tests/unit/drtsans/test_i_of_q_2d_binning.py b/tests/unit/drtsans/test_i_of_q_2d_binning.py index ced1791a2..140eeda66 100644 --- a/tests/unit/drtsans/test_i_of_q_2d_binning.py +++ b/tests/unit/drtsans/test_i_of_q_2d_binning.py @@ -68,31 +68,31 @@ def test_2d_bin_no_sub_no_wt(): # verify dQx and dQy assert binned_iq_2d.delta_qx[1][1] == pytest.approx(0.00816, abs=1e-5), ( - "dQx {} is incorrect comparing to {}." "".format(binned_iq_2d[2][1][1], 0.00816) + "dQx {} is incorrect comparing to {}.".format(binned_iq_2d[2][1][1], 0.00816) ) assert binned_iq_2d.delta_qy[1][1] == pytest.approx(0.00816, abs=1e-5), ( - "dQy {}is incorrect comparing to {}." "".format(binned_iq_2d[3][1][1], 0.00816) + "dQy {}is incorrect comparing to {}.".format(binned_iq_2d[3][1][1], 0.00816) ) # verify Qx and Qy on off diagonal values # Qx in row 0 shall be all same as qx bin center [1] - assert binned_iq_2d.qx[0][1] == pytest.approx( - qx_bins.centers[0], abs=1e-5 - ), "Qx[0, 1] {} shall be same as Qx bin center [1] {}".format(binned_iq_2d.qx[0][1], qx_bins.centers[0]) + assert binned_iq_2d.qx[0][1] == pytest.approx(qx_bins.centers[0], abs=1e-5), ( + "Qx[0, 1] {} shall be same as Qx bin center [1] {}".format(binned_iq_2d.qx[0][1], qx_bins.centers[0]) + ) # Qx in row 1 shall be all same as qx bin center [0] - assert binned_iq_2d.qx[1][0] == pytest.approx( - qx_bins.centers[1], abs=1e-5 - ), "Qx[1, 0] {} shall be same as Qx bin center [1] {}".format(binned_iq_2d.qx[1][0], qx_bins.centers[1]) + assert binned_iq_2d.qx[1][0] == pytest.approx(qx_bins.centers[1], abs=1e-5), ( + "Qx[1, 0] {} shall be same as Qx bin center [1] {}".format(binned_iq_2d.qx[1][0], qx_bins.centers[1]) + ) # Qy in col 0 shall be all same as qy bin center [0] - assert binned_iq_2d.qy[1][0] == pytest.approx( - qy_bins.centers[0], abs=1e-5 - ), "Qy[1, 0] {} shall be same as Qy bin center [0] {}".format(binned_iq_2d.qy[1][0], qy_bins.centers[0]) + assert binned_iq_2d.qy[1][0] == pytest.approx(qy_bins.centers[0], abs=1e-5), ( + "Qy[1, 0] {} shall be same as Qy bin center [0] {}".format(binned_iq_2d.qy[1][0], qy_bins.centers[0]) + ) # Qy in col 1 shall be all same as qy bin center [1] - assert binned_iq_2d.qy[0][1] == pytest.approx( - qy_bins.centers[1], abs=1e-5 - ), "Qy[0, 1] {} shall be same as Qy bin center [1] {}".format(binned_iq_2d.qx[0][1], qy_bins.centers[1]) + assert binned_iq_2d.qy[0][1] == pytest.approx(qy_bins.centers[1], abs=1e-5), ( + "Qy[0, 1] {} shall be same as Qy bin center [1] {}".format(binned_iq_2d.qx[0][1], qy_bins.centers[1]) + ) def test_2d_bin_no_sub_no_wt_wavelength(): @@ -160,30 +160,30 @@ def test_2d_bin_no_sub_no_wt_wavelength(): # verify dQx and dQy assert binned_iq_2d.delta_qx[1][1] == pytest.approx(0.00816, abs=1e-5), ( - "dQx {} is incorrect comparing to {}." "".format(binned_iq_2d[2][1][1], 0.00816) + "dQx {} is incorrect comparing to {}.".format(binned_iq_2d[2][1][1], 0.00816) ) assert binned_iq_2d.delta_qy[1][1] == pytest.approx(0.00816, abs=1e-5), ( - "dQy {}is incorrect comparing to {}." "".format(binned_iq_2d[3][1][1], 0.00816) + "dQy {}is incorrect comparing to {}.".format(binned_iq_2d[3][1][1], 0.00816) ) # verify Qx and Qy on off diagonal values # Qx in row 0 shall be all same as qx bin center [1] - assert binned_iq_2d.qx[0][1] == pytest.approx( - qx_bins.centers[0], abs=1e-5 - ), "Qx[0, 1] {} shall be same as Qx bin center [1] {}".format(binned_iq_2d.qx[0][1], qx_bins.centers[0]) + assert binned_iq_2d.qx[0][1] == pytest.approx(qx_bins.centers[0], abs=1e-5), ( + "Qx[0, 1] {} shall be same as Qx bin center [1] {}".format(binned_iq_2d.qx[0][1], qx_bins.centers[0]) + ) # Qx in row 1 shall be all same as qx bin center [0] - assert binned_iq_2d.qx[1][0] == pytest.approx( - qx_bins.centers[1], abs=1e-5 - ), "Qx[1, 0] {} shall be same as Qx bin center [1] {}".format(binned_iq_2d.qx[1][0], qx_bins.centers[1]) + assert binned_iq_2d.qx[1][0] == pytest.approx(qx_bins.centers[1], abs=1e-5), ( + "Qx[1, 0] {} shall be same as Qx bin center [1] {}".format(binned_iq_2d.qx[1][0], qx_bins.centers[1]) + ) # Qy in col 0 shall be all same as qy bin center [0] - assert binned_iq_2d.qy[1][0] == pytest.approx( - qy_bins.centers[0], abs=1e-5 - ), "Qy[1, 0] {} shall be same as Qy bin center [0] {}".format(binned_iq_2d.qy[1][0], qy_bins.centers[0]) + assert binned_iq_2d.qy[1][0] == pytest.approx(qy_bins.centers[0], abs=1e-5), ( + "Qy[1, 0] {} shall be same as Qy bin center [0] {}".format(binned_iq_2d.qy[1][0], qy_bins.centers[0]) + ) # Qy in col 1 shall be all same as qy bin center [1] - assert binned_iq_2d.qy[0][1] == pytest.approx( - qy_bins.centers[1], abs=1e-5 - ), "Qy[0, 1] {} shall be same as Qy bin center [1] {}".format(binned_iq_2d.qx[0][1], qy_bins.centers[1]) + assert binned_iq_2d.qy[0][1] == pytest.approx(qy_bins.centers[1], abs=1e-5), ( + "Qy[0, 1] {} shall be same as Qy bin center [1] {}".format(binned_iq_2d.qx[0][1], qy_bins.centers[1]) + ) def test_2d_bin_no_sub_wt(): @@ -223,9 +223,9 @@ def test_2d_bin_no_sub_wt(): # verify I(-0.003254,-0.001713) and sigma(-0.003254,-0.001713) assert binned_iq_2d.intensity[1][1] == pytest.approx(56.8660, abs=1e-4), "Weighted-binned I(Qx, Qy) is incorrect" - assert binned_iq_2d.error[1][1] == pytest.approx( - 4.353773265, abs=1e-8 - ), "Weighted-binned sigma I(Qx, Qy) is incorrect" + assert binned_iq_2d.error[1][1] == pytest.approx(4.353773265, abs=1e-8), ( + "Weighted-binned sigma I(Qx, Qy) is incorrect" + ) # verify dQx and dQy assert binned_iq_2d.delta_qx[1][1] == pytest.approx(0.00815, abs=1e-5), "dQx is incorrect" diff --git a/tests/unit/drtsans/test_i_of_q_annular_binning.py b/tests/unit/drtsans/test_i_of_q_annular_binning.py index d8895d116..107f49d87 100644 --- a/tests/unit/drtsans/test_i_of_q_annular_binning.py +++ b/tests/unit/drtsans/test_i_of_q_annular_binning.py @@ -59,7 +59,7 @@ def test_1d_annular_no_wt(): assert binned_iq.intensity[1] == pytest.approx(63.66666667, abs=1e-8), "Binned intensity is wrong" assert binned_iq.error[1] == pytest.approx(3.257470048, abs=1e-8), "Binned sigma I is wrong" assert binned_iq.delta_mod_q[1] == pytest.approx(1.154e-02, abs=1e-5), ( - "Binned Q resolution {} " "is incorrect comparing to {}." "".format(binned_iq.delta_mod_q[1], 0.01154) + "Binned Q resolution {} is incorrect comparing to {}.".format(binned_iq.delta_mod_q[1], 0.01154) ) # this is actually theta np.testing.assert_almost_equal(binned_iq.mod_q, np.linspace(start=18, stop=theta_max - 18, num=num_bins)) diff --git a/tests/unit/drtsans/test_i_of_q_wedge_binning.py b/tests/unit/drtsans/test_i_of_q_wedge_binning.py index 4dbd59352..69efdb762 100644 --- a/tests/unit/drtsans/test_i_of_q_wedge_binning.py +++ b/tests/unit/drtsans/test_i_of_q_wedge_binning.py @@ -67,7 +67,7 @@ def test_1d_bin_log_wedge_no_wt(): assert binned_iq.intensity[7] == pytest.approx(70.0, abs=1e-10) assert binned_iq.error[7] == pytest.approx(3.741657387, abs=1e-5) assert binned_iq.delta_mod_q[7] == pytest.approx(0.011460, abs=1e-4), ( - "Q resolution (Q[7] = {}) is " "incorrect comparing to {}." "".format(binned_iq.delta_mod_q[7], 0.0115) + "Q resolution (Q[7] = {}) is incorrect comparing to {}.".format(binned_iq.delta_mod_q[7], 0.0115) ) diff --git a/tests/unit/drtsans/test_plotting.py b/tests/unit/drtsans/test_plotting.py index 912b3f1c9..130c58268 100644 --- a/tests/unit/drtsans/test_plotting.py +++ b/tests/unit/drtsans/test_plotting.py @@ -30,7 +30,7 @@ def verify_images(test_png: str, gold_png): np.testing.assert_allclose( tested_image, gold_image, - err_msg=f"Testing result {tested_image} does not match " f"the expected result {gold_image}", + err_msg=f"Testing result {tested_image} does not match the expected result {gold_image}", ) @@ -146,7 +146,7 @@ def test_iq2d_data() -> Tuple[Any, Any, Any, Any]: intensity[1, 59] = 8.0 assert intensity.shape == (40, 60), ( - f"Expected intensity is 40 row (Qy) and 60 column (Qx) " f"but not {intensity.shape}" + f"Expected intensity is 40 row (Qy) and 60 column (Qx) but not {intensity.shape}" ) return mesh_x, mesh_y, intensity, error diff --git a/tests/unit/drtsans/test_savereductionlog.py b/tests/unit/drtsans/test_savereductionlog.py index 9d8d0787d..910379715 100644 --- a/tests/unit/drtsans/test_savereductionlog.py +++ b/tests/unit/drtsans/test_savereductionlog.py @@ -485,8 +485,8 @@ def test_reduction_parameters(cleanfile): try: assert _strValue(reduction_information_entry["drtsans"], "version") == drtsans_version except AttributeError as att_err: - info = f'h5py version = {h5py.__version__}: type: {type(reduction_information_entry["drtsans"])}' - info += f'\nmethods: {dir(reduction_information_entry["drtsans"])}' + info = f"h5py version = {h5py.__version__}: type: {type(reduction_information_entry['drtsans'])}" + info += f"\nmethods: {dir(reduction_information_entry['drtsans'])}" info += f"\nError: {att_err}" raise AttributeError(info) assert _strValue(reduction_information_entry["mantid"], "version") == mantid_version From 186817a876d2160d075be943be2a6a6975174ae3 Mon Sep 17 00:00:00 2001 From: glass-ships Date: Mon, 10 Feb 2025 14:50:28 -0500 Subject: [PATCH 19/23] minor formatting in test_transmission --- tests/unit/drtsans/tof/eqsans/test_transmission.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/unit/drtsans/tof/eqsans/test_transmission.py b/tests/unit/drtsans/tof/eqsans/test_transmission.py index 3c34375b9..5dab8d2a4 100644 --- a/tests/unit/drtsans/tof/eqsans/test_transmission.py +++ b/tests/unit/drtsans/tof/eqsans/test_transmission.py @@ -82,8 +82,8 @@ def test_fit_raw(trasmission_data, clean_workspace): """ # Non-skip mode fitting_results = fit_raw_transmission(trasmission_data.raw, output_workspace=mtd.unique_hidden_name()) - (clean_workspace(fitting_results.transmission),) - (clean_workspace(fitting_results.lead_transmission),) + clean_workspace(fitting_results.transmission) + clean_workspace(fitting_results.lead_transmission) clean_workspace(fitting_results.lead_mantid_fit.OutputWorkspace) clean_workspace(fitting_results.lead_mantid_fit.OutputNormalisedCovarianceMatrix) clean_workspace(fitting_results.lead_mantid_fit.OutputParameters) @@ -91,8 +91,8 @@ def test_fit_raw(trasmission_data, clean_workspace): # Frame-skipping mode fitting_results = fit_raw_transmission(trasmission_data.raw_skip, output_workspace=mtd.unique_hidden_name()) - (clean_workspace(fitting_results.transmission),) - (clean_workspace(fitting_results.lead_transmission),) + clean_workspace(fitting_results.transmission) + clean_workspace(fitting_results.lead_transmission) clean_workspace(fitting_results.lead_mantid_fit.OutputWorkspace) clean_workspace(fitting_results.lead_mantid_fit.OutputNormalisedCovarianceMatrix) clean_workspace(fitting_results.lead_mantid_fit.OutputParameters) From 1bf11f29045a0ba155e0e85c43bf0b9b9c8d8385 Mon Sep 17 00:00:00 2001 From: glass-ships Date: Mon, 10 Feb 2025 14:54:37 -0500 Subject: [PATCH 20/23] minor formatting in test_simulated_events --- .../drtsans/tof/eqsans/test_simulated_events.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/integration/drtsans/tof/eqsans/test_simulated_events.py b/tests/integration/drtsans/tof/eqsans/test_simulated_events.py index 36c6aab8c..232ec3e86 100644 --- a/tests/integration/drtsans/tof/eqsans/test_simulated_events.py +++ b/tests/integration/drtsans/tof/eqsans/test_simulated_events.py @@ -717,8 +717,8 @@ def _mock_transform_to_wavelength(*args, **kwargs): # log time-of-flight relevant info (needed for dark-current correction) low_tof_clip, high_tof_clip = kwargs["low_tof_clip"], kwargs["high_tof_clip"] pulse_period = metadata["pulse_period"] - (sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"),) - (sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"),) + sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms") + sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms") sample_logs.insert("tof_frame_width", pulse_period, unit="ms") tof_width_clipped = pulse_period - low_tof_clip - high_tof_clip sample_logs.insert("tof_frame_width_clipped", tof_width_clipped, unit="ms") @@ -809,8 +809,8 @@ def _mock_transform_to_wavelength(*args, **kwargs): # log time-of-flight relevant info (needed for dark-current correction) low_tof_clip, high_tof_clip = kwargs["low_tof_clip"], kwargs["high_tof_clip"] pulse_period = metadata["pulse_period"] - (sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"),) - (sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"),) + sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms") + sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms") sample_logs.insert("tof_frame_width", pulse_period, unit="ms") tof_width_clipped = pulse_period - low_tof_clip - high_tof_clip sample_logs.insert("tof_frame_width_clipped", tof_width_clipped, unit="ms") @@ -915,8 +915,8 @@ def _mock_transform_to_wavelength(*args, **kwargs): # log time-of-flight relevant info (needed for dark-current correction) low_tof_clip, high_tof_clip = kwargs["low_tof_clip"], kwargs["high_tof_clip"] pulse_period = metadata["pulse_period"] - (sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms"),) - (sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms"),) + sample_logs.insert("low_tof_clip", low_tof_clip, unit="ms") + sample_logs.insert("high_tof_clip", high_tof_clip, unit="ms") sample_logs.insert("tof_frame_width", pulse_period, unit="ms") tof_width_clipped = pulse_period - low_tof_clip - high_tof_clip sample_logs.insert("tof_frame_width_clipped", tof_width_clipped, unit="ms") From 6430e56b22b1f1482094176d489016863362e582 Mon Sep 17 00:00:00 2001 From: glass-ships Date: Tue, 11 Feb 2025 11:02:25 -0500 Subject: [PATCH 21/23] update urls to point to github --- .github/pull_request_template.md | 4 ++-- OnboardingChecklist.rst | 6 +++--- README_developer.rst | 6 +++--- docs/drtsans/reduction_scripts.rst | 6 +++--- docs/user/reduction_output.rst | 2 +- notebooks/barscan/barscan.ipynb | 2 +- src/drtsans/dark_current.py | 4 ++-- src/drtsans/iq.py | 4 ++-- src/drtsans/momentum_transfer.py | 6 +++--- src/drtsans/mono/dark_current.py | 6 +++--- src/drtsans/mono/gpsans/attenuation.py | 4 ++-- src/drtsans/mono/momentum_transfer.py | 6 +++--- src/drtsans/mono/normalization.py | 2 +- src/drtsans/pixel_calibration.py | 6 +++--- src/drtsans/prepare_sensivities_correction.py | 20 +++++++++---------- src/drtsans/sensitivity_correction_patch.py | 2 +- src/drtsans/stitch.py | 2 +- src/drtsans/tof/eqsans/dark_current.py | 8 ++++---- src/drtsans/tof/eqsans/momentum_transfer.py | 12 +++++------ src/drtsans/tof/eqsans/normalization.py | 6 +++--- src/drtsans/tof/eqsans/transmission.py | 6 +++--- src/drtsans/transmission.py | 4 ++-- .../drtsans/mono/test_absolute_units.py | 8 ++++---- .../drtsans/mono/test_dark_current.py | 8 ++++---- .../drtsans/mono/test_normalization.py | 12 +++++------ .../drtsans/mono/test_transmission.py | 2 +- .../drtsans/test_absolute_units.py | 4 ++-- .../drtsans/test_momentum_transfer.py | 4 ++-- .../drtsans/test_pixel_calibration.py | 2 +- tests/integration/drtsans/test_stitch.py | 8 ++++---- .../drtsans/test_subtract_background.py | 2 +- .../drtsans/test_tubecollection.py | 2 +- .../drtsans/tof/eqsans/test_dark_current.py | 8 ++++---- .../drtsans/tof/eqsans/test_normalization.py | 16 +++++++-------- .../drtsans/tof/eqsans/test_transmission.py | 14 ++++++------- .../drtsans/mono/gpsans/test_attenuation.py | 4 ++-- tests/unit/drtsans/mono/test_dark_current.py | 6 +++--- .../drtsans/mono/test_momentum_transfer.py | 4 ++-- tests/unit/drtsans/mono/test_normalization.py | 4 ++-- tests/unit/drtsans/test_dark_current.py | 2 +- tests/unit/drtsans/test_i_of_q_1d_binning.py | 2 +- tests/unit/drtsans/test_i_of_q_2d_binning.py | 2 +- .../drtsans/test_i_of_q_annular_binning.py | 2 +- .../unit/drtsans/test_i_of_q_wedge_binning.py | 2 +- .../test_i_q_azimuthal_to_i_q_modulo.py | 2 +- tests/unit/drtsans/test_momentum_transfer.py | 6 +++--- tests/unit/drtsans/test_sensitivity.py | 2 +- ...sensitivity_correction_moving_detectors.py | 2 +- .../drtsans/test_thickness_normalization.py | 2 +- .../drtsans/test_transmission_correction.py | 2 +- tests/unit/drtsans/test_tubecollection.py | 2 +- .../eqsans/test_convert_tof_to_wavelength.py | 4 ++-- .../drtsans/tof/eqsans/test_dark_current.py | 4 ++-- .../drtsans/tof/eqsans/test_normalization.py | 8 ++++---- .../drtsans/tof/eqsans/test_resolution.py | 2 +- .../drtsans/tof/eqsans/test_transmission.py | 6 +++--- 56 files changed, 142 insertions(+), 142 deletions(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index c2a72667b..bf882e192 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,7 +1,7 @@ ## Description of work: Check all that apply: -- [ ] added [release notes](https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/-/blob/next/docs/release_notes.rst?ref_type=heads) (if not, provide an explanation in the work description) +- [ ] added [release notes](https://github.com/neutrons/drtsans/blob/next/docs/release_notes.rst) (if not, provide an explanation in the work description) - [ ] updated documentation - [ ] Source added/refactored - [ ] Added unit tests @@ -16,7 +16,7 @@ Check all that apply: ## Check list for the reviewer -- [ ] [release notes](https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/-/blob/next/docs/release_notes.rst?ref_type=heads) updated, or an explanation is provided as to why release notes are unnecessary +- [ ] [release notes](https://github.com/neutrons/drtsans/blob/next/docs/release_notes.rst) updated, or an explanation is provided as to why release notes are unnecessary - [ ] best software practices + [ ] clearly named variables (better to be verbose in variable names) + [ ] code comments explaining the intent of code blocks diff --git a/OnboardingChecklist.rst b/OnboardingChecklist.rst index 4bb79ef59..bfda86276 100644 --- a/OnboardingChecklist.rst +++ b/OnboardingChecklist.rst @@ -86,7 +86,7 @@ or Development procedure ##################### -How to develop codes in drtSANS shall follow the instruction in `CONTRIBUTION `_. +How to develop codes in drtSANS shall follow the instruction in `CONTRIBUTION `_. .. 1. A developer is assigned with a task during neutron status meeting and changes the task's status to **In Progress**. @@ -113,8 +113,8 @@ Test Driven Development (TDD) Examples: - * `drtsans/resolution.py `_ and `tests/unit/new/drtsans/test_resolution.py `_. - * `drtsans/tof/eqsans/incoherence_correction_1d.py `_ and `tests/unit/new/drtsans/tof/eqsans/test_incoherence_correction_1d.py `_. + * `drtsans/resolution.py `_ and `tests/unit/drtsans/test_resolution.py `_. + * `drtsans/tof/eqsans/incoherence_correction_1d.py `_ and `tests/unit/drtsans/tof/eqsans/test_incoherence_correction_1d.py `_. * Integration test diff --git a/README_developer.rst b/README_developer.rst index 2108156d2..eb7efccac 100644 --- a/README_developer.rst +++ b/README_developer.rst @@ -200,13 +200,13 @@ Test Driven Development (TDD) * Unit test All methods and modules shall have unit tests implemented. - Unit tests are located in `repo/tests/unit/new `_. + Unit tests are located in `repo/tests/unit/new `_. A unit test shall be created in the corresponding directory to the method or module that it tests against. Examples: - * `drtsans/resolution.py `_ and `tests/unit/new/drtsans/test_resolution.py `_. - * `drtsans/tof/eqsans/incoherence_correction_1d.py `_ and `tests/unit/new/drtsans/tof/eqsans/test_incoherence_correction_1d.py `_. + * `drtsans/resolution.py `_ and `tests/unit/drtsans/test_resolution.py `_. + * `drtsans/tof/eqsans/incoherence_correction_1d.py `_ and `tests/unit/drtsans/tof/eqsans/test_incoherence_correction_1d.py `_. * Integration test diff --git a/docs/drtsans/reduction_scripts.rst b/docs/drtsans/reduction_scripts.rst index 463e116de..ca63ef2ab 100644 --- a/docs/drtsans/reduction_scripts.rst +++ b/docs/drtsans/reduction_scripts.rst @@ -7,9 +7,9 @@ Reduction Scripts The following python scripts can be used as the entry points for reduction of SANS data for each instrument -- `scripts/biosans_reduction.py `_ -- `scripts/eqsans_reduction.py `_ -- `scripts/gpsans_reduction.py `_ +- `scripts/biosans_reduction.py `_ +- `scripts/eqsans_reduction.py `_ +- `scripts/gpsans_reduction.py `_ These scripts receive as argument the path to a `*.json` file containing all necessary reduction parameters. In the active `sans` conda environment, and assuming we are at the root of the drtsans repository: diff --git a/docs/user/reduction_output.rst b/docs/user/reduction_output.rst index bdadd335b..d8e62e085 100644 --- a/docs/user/reduction_output.rst +++ b/docs/user/reduction_output.rst @@ -20,7 +20,7 @@ Customized reports could be generated form reduction hdf log files. The script ` generate_report /path/to/hdf/log/file.hdf [/path/to/my/generate_report.yaml] -The last parameter here is a path to a users' YAML file with report parameters. If not provided, the [default parameters](https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/-/blob/next/scripts/generate_report.yaml) will be used to create the report. +The last parameter here is a path to a users' YAML file with report parameters. If not provided, the [default parameters](https://github.com/neutrons/drtsans/blob/next/scripts/generate_report.yaml) will be used to create the report. The yaml file contains the keys to extract from the hdf log file and short aliases to be used in the report diff --git a/notebooks/barscan/barscan.ipynb b/notebooks/barscan/barscan.ipynb index 7f576fff8..51417b39a 100644 --- a/notebooks/barscan/barscan.ipynb +++ b/notebooks/barscan/barscan.ipynb @@ -100,7 +100,7 @@ "source": [ "

Plotting the calibration

\n", "\n", - "We can create a **2D intensity plot** for the pixel positions and heights using the `as_intensities` method of the calibration object (the calibration is a table of type [Table](https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fpixel_calibration.py#L90))\n", + "We can create a **2D intensity plot** for the pixel positions and heights using the `as_intensities` method of the calibration object (the calibration is a table of type [Table](https://github.com/neutrons/drtsans/blob/next/src/drtsans/pixel_calibration.py#L285))\n", "\n", "**Functions and Algorithms used:**\n", "- [plot_detector](http://docs.drt-sans.ornl.gov/drtsans/plots.html#drtsans.plots.plot_detector)" diff --git a/src/drtsans/dark_current.py b/src/drtsans/dark_current.py index 289cd0582..b1a58fa69 100644 --- a/src/drtsans/dark_current.py +++ b/src/drtsans/dark_current.py @@ -11,8 +11,8 @@ r""" Hyperlinks to drtsans functions -namedtuplefy -SampleLogs +namedtuplefy +SampleLogs """ # noqa: E501 from drtsans.settings import namedtuplefy from drtsans.samplelogs import SampleLogs diff --git a/src/drtsans/iq.py b/src/drtsans/iq.py index edca6e3f3..fb8a4f056 100644 --- a/src/drtsans/iq.py +++ b/src/drtsans/iq.py @@ -1,4 +1,4 @@ -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/dataobjects.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/dataobjects.py # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/docs/drtsans/dataobjects.rst import numpy @@ -14,7 +14,7 @@ from typing import List, Any, Tuple import numpy as np -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/determine_bins.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/determine_bins.py from drtsans.determine_bins import ( determine_1d_log_bins, determine_1d_linear_bins, diff --git a/src/drtsans/momentum_transfer.py b/src/drtsans/momentum_transfer.py index e54da3797..b551409b9 100644 --- a/src/drtsans/momentum_transfer.py +++ b/src/drtsans/momentum_transfer.py @@ -4,9 +4,9 @@ r""" Hyperlinks to drtsans functions -IQazimuthal, IQcrystal, IQmod -pixel_centers -namedtuplefy, unpack_v3d +IQazimuthal, IQcrystal, IQmod +pixel_centers +namedtuplefy, unpack_v3d """ # noqa: E501 from drtsans.dataobjects import IQazimuthal, IQcrystal, IQmod from drtsans.geometry import pixel_centers diff --git a/src/drtsans/mono/dark_current.py b/src/drtsans/mono/dark_current.py index e9862152d..92ef05d32 100644 --- a/src/drtsans/mono/dark_current.py +++ b/src/drtsans/mono/dark_current.py @@ -8,9 +8,9 @@ from mantid.simpleapi import Minus, mtd, DeleteWorkspace, Scale, Integration r""" links to drtsans imports -SampleLogs -duration -set_init_uncertainties +SampleLogs +duration +set_init_uncertainties """ # noqa: E501 from drtsans.samplelogs import SampleLogs from drtsans.dark_current import duration diff --git a/src/drtsans/mono/gpsans/attenuation.py b/src/drtsans/mono/gpsans/attenuation.py index ddb777fd3..6c0b967bf 100644 --- a/src/drtsans/mono/gpsans/attenuation.py +++ b/src/drtsans/mono/gpsans/attenuation.py @@ -1,6 +1,6 @@ import numpy as np -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/samplelogs.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/samplelogs.py from drtsans.samplelogs import SampleLogs # Functions exposed to the general user (public) API @@ -58,7 +58,7 @@ def attenuation_factor(input_workspace): # The fitted attenuator parameters for the equation A * exp(-B * λ) + C # Provided by Lisa Debeer-Schmitt, 2020-02-26, original file # provided can be found at - # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/data/attenuator_fitting_parameters.txt + # https://github.com/neutrons/drtsans/blob/next/data/attenuator_fitting_parameters.txt # In the following format (Amp, Amp Err, exp const, exp const err, bkgd, bkgd err) attenuators = { 3: ( # x3 diff --git a/src/drtsans/mono/momentum_transfer.py b/src/drtsans/mono/momentum_transfer.py index 223e0f7a0..76f1c1c06 100644 --- a/src/drtsans/mono/momentum_transfer.py +++ b/src/drtsans/mono/momentum_transfer.py @@ -1,12 +1,12 @@ import numpy as np -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/momentum_transfer.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/momentum_transfer.py import drtsans.momentum_transfer -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/resolution.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/resolution.py import drtsans.resolution -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/geometry.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/geometry.py from drtsans.geometry import ( logged_smearing_pixel_size, sample_aperture_diameter, diff --git a/src/drtsans/mono/normalization.py b/src/drtsans/mono/normalization.py index 4edc935d8..18d6cf1a1 100644 --- a/src/drtsans/mono/normalization.py +++ b/src/drtsans/mono/normalization.py @@ -9,7 +9,7 @@ r""" Hyperlinks to drtsans functions -SampleLogs +SampleLogs """ # noqa: E501 from drtsans.samplelogs import SampleLogs diff --git a/src/drtsans/pixel_calibration.py b/src/drtsans/pixel_calibration.py index 79a6222ea..e329a171c 100644 --- a/src/drtsans/pixel_calibration.py +++ b/src/drtsans/pixel_calibration.py @@ -49,9 +49,9 @@ r""" Hyperlinks to drtsans functions -namedtuplefy -SampleLogs -TubeCollection +namedtuplefy +SampleLogs +TubeCollection """ # noqa: E501 from drtsans.instruments import ( InstrumentEnumName, diff --git a/src/drtsans/prepare_sensivities_correction.py b/src/drtsans/prepare_sensivities_correction.py index e5315abc2..a7be6b90c 100644 --- a/src/drtsans/prepare_sensivities_correction.py +++ b/src/drtsans/prepare_sensivities_correction.py @@ -47,9 +47,9 @@ # Using dictionary with instrument name as key is solution for it. # prepare data in .../api.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fmono%2Fgpsans%2Fapi.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fmono%2Fbiosans%2Fapi.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Ftof%2Feqsans%2Fapi.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fmono%2Fgpsans%2Fapi.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fmono%2Fbiosans%2Fapi.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Ftof%2Feqsans%2Fapi.py PREPARE_DATA = { CG2: drtsans.mono.gpsans.api.prepare_data, CG3: drtsans.mono.biosans.api.prepare_data, @@ -57,8 +57,8 @@ } # Find beam center in .../find_beam_center.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fmono%2Fbiosans%2Fbeam_finder.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fbeam_finder.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fmono%2Fbiosans%2Fbeam_finder.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fbeam_finder.py FIND_BEAM_CENTER = { CG2: drtsans.mono.gpsans.find_beam_center, CG3: drtsans.mono.biosans.find_beam_center, @@ -66,23 +66,23 @@ } # Center detector in -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fmono%2Fbiosans%2Fbeam_finder.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fbeam_finder.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fmono%2Fbiosans%2Fbeam_finder.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fbeam_finder.py CENTER_DETECTOR = { CG2: drtsans.mono.gpsans.center_detector, CG3: drtsans.mono.biosans.center_detector, EQSANS: drtsans.tof.eqsans.center_detector, } -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fsolid_angle.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fsolid_angle.py SOLID_ANGLE_CORRECTION = { CG2: drtsans.mono.gpsans.solid_angle_correction, CG3: drtsans.mono.biosans.solid_angle_correction, EQSANS: drtsans.solid_angle_correction, } -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fsensitivity_correction_moving_detectors.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fsensitivity_correction_patch.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fsensitivity_correction_moving_detectors.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fsensitivity_correction_patch.py CALCULATE_SENSITIVITY_CORRECTION = { MOVING_DETECTORS: calculate_sensitivity_correction_moving, PATCHING_DETECTORS: calculate_sensitivity_correction_patch, diff --git a/src/drtsans/sensitivity_correction_patch.py b/src/drtsans/sensitivity_correction_patch.py index 82214ee17..25cdab06e 100644 --- a/src/drtsans/sensitivity_correction_patch.py +++ b/src/drtsans/sensitivity_correction_patch.py @@ -17,7 +17,7 @@ CreateWorkspace, ) -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans%2Fdetector.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans%2Fdetector.py from drtsans.detector import Component diff --git a/src/drtsans/stitch.py b/src/drtsans/stitch.py index 1d309635e..83f2e443e 100644 --- a/src/drtsans/stitch.py +++ b/src/drtsans/stitch.py @@ -19,7 +19,7 @@ def stitch_profiles(profiles, overlaps, target_profile_index=0): **drtsans objects used**: ~drtsans.dataobjects.IQmod - + Parameters ---------- diff --git a/src/drtsans/tof/eqsans/dark_current.py b/src/drtsans/tof/eqsans/dark_current.py index e586111a1..c2f1d8f85 100644 --- a/src/drtsans/tof/eqsans/dark_current.py +++ b/src/drtsans/tof/eqsans/dark_current.py @@ -11,10 +11,10 @@ r""" Hyperlinks to drtsans functions -exists, registered_workspace -SampleLogs -clipped_bands_from_logs -duration, counts_in_detector +exists, registered_workspace +SampleLogs +clipped_bands_from_logs +duration, counts_in_detector """ # noqa: E501 from drtsans.path import exists, registered_workspace from drtsans.samplelogs import SampleLogs diff --git a/src/drtsans/tof/eqsans/momentum_transfer.py b/src/drtsans/tof/eqsans/momentum_transfer.py index 77ed76112..b6bcb19db 100644 --- a/src/drtsans/tof/eqsans/momentum_transfer.py +++ b/src/drtsans/tof/eqsans/momentum_transfer.py @@ -1,26 +1,26 @@ import numpy as np from collections import namedtuple -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/momentum_transfer.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/momentum_transfer.py import drtsans.momentum_transfer -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/resolution.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/resolution.py import drtsans.resolution -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/tof/eqsans/geometry.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/tof/eqsans/geometry.py from drtsans.tof.eqsans.geometry import ( sample_aperture_diameter, source_aperture_diameter, source_aperture_sample_distance, ) -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/geometry.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/geometry.py from drtsans import geometry as sans_geometry -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/samplelogs.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/samplelogs.py from drtsans.samplelogs import SampleLogs -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/dataobjects.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/dataobjects.py from drtsans.dataobjects import IQazimuthal, IQcrystal, IQmod, DataType from mantid.kernel import logger diff --git a/src/drtsans/tof/eqsans/normalization.py b/src/drtsans/tof/eqsans/normalization.py index 8c7c99be1..fac386090 100644 --- a/src/drtsans/tof/eqsans/normalization.py +++ b/src/drtsans/tof/eqsans/normalization.py @@ -33,9 +33,9 @@ r""" Hyperlinks to drtsans functions -SampleLogs -path -duration +SampleLogs +path +duration """ # noqa: E501 from drtsans.samplelogs import SampleLogs from drtsans import path diff --git a/src/drtsans/tof/eqsans/transmission.py b/src/drtsans/tof/eqsans/transmission.py index c30a8ff7a..5c3ca18cd 100644 --- a/src/drtsans/tof/eqsans/transmission.py +++ b/src/drtsans/tof/eqsans/transmission.py @@ -12,10 +12,10 @@ r""" Hyperlinks to drtsans functions -namedtuplefy -calculate_transmission +namedtuplefy +calculate_transmission clipped_bands_from_logs, transmitted_bands available at: - + """ # noqa: E501 from drtsans.settings import namedtuplefy from drtsans.transmission import calculate_transmission as calculate_raw_transmission diff --git a/src/drtsans/transmission.py b/src/drtsans/transmission.py index dbf1d1c82..fe2f8e84c 100644 --- a/src/drtsans/transmission.py +++ b/src/drtsans/transmission.py @@ -19,8 +19,8 @@ r""" links to drtsans imports circular_mask_from_beam_center, masked_detectors available at: - -beam_radius + +beam_radius """ # noqa: E501 from drtsans.mask_utils import circular_mask_from_beam_center, masked_detectors diff --git a/tests/integration/drtsans/mono/test_absolute_units.py b/tests/integration/drtsans/mono/test_absolute_units.py index fdbf19183..cdcbff260 100644 --- a/tests/integration/drtsans/mono/test_absolute_units.py +++ b/tests/integration/drtsans/mono/test_absolute_units.py @@ -4,9 +4,9 @@ from mantid.simpleapi import mtd r""" Links to drtsans imports -center_detector -namedtuplefy -empty_beam_scaling +center_detector +namedtuplefy +empty_beam_scaling """ from drtsans import center_detector from drtsans.settings import namedtuplefy @@ -82,7 +82,7 @@ def test_empty_beam_scaling(workspace_with_instrument, test_data_15b, temp_works ~drtsans.tof.eqsans.center_detector, ~drtsans.absolute_units.empty_beam_scaling, - + """ # save the intensities of the empty beam in a numpy.ndarray. diff --git a/tests/integration/drtsans/mono/test_dark_current.py b/tests/integration/drtsans/mono/test_dark_current.py index 11ef428dc..8234b2b68 100644 --- a/tests/integration/drtsans/mono/test_dark_current.py +++ b/tests/integration/drtsans/mono/test_dark_current.py @@ -5,8 +5,8 @@ # DeleteWorkspaces from mantid.simpleapi import mtd, CreateWorkspace -# SampleLogs within -# subtract_dark_current +# subtract_dark_current + ~drtsans.mono.dark_current.subtract_dark_current - + """ # Create dark current workspace, insert the duration of the dark current run as one of the log entries in the # dark current workspace. diff --git a/tests/integration/drtsans/mono/test_normalization.py b/tests/integration/drtsans/mono/test_normalization.py index 6eed7bb2a..40008abe0 100644 --- a/tests/integration/drtsans/mono/test_normalization.py +++ b/tests/integration/drtsans/mono/test_normalization.py @@ -4,8 +4,8 @@ # CreateWorkspace from mantid.simpleapi import CreateWorkspace -# SampleLogs within -# time, monitor within +# SampleLogs within +# time, monitor within from drtsans.samplelogs import SampleLogs from drtsans.mono.normalization import normalize_by_time, normalize_by_monitor @@ -68,9 +68,9 @@ def test_normalization_by_time(data_test_16a, temp_workspace_name): **drtsans functions used:** ~drtsans.samplelogs.SampleLogs - + ~drtsans.mono.normalization.time - + """ # Create a Mantid workspace with the sample intensities intensities_list = list(itertools.chain(*data_test_16a["I_sam"])) @@ -111,9 +111,9 @@ def test_normalization_by_monitor(data_test_16a, temp_workspace_name): **drtsans functions used:** ~drtsans.samplelogs.SampleLogs - + ~drtsans.mono.normalization.monitor - + """ # Create a Mantid workspace with the sample intensities intensities_list = list(itertools.chain(*data_test_16a["I_sam"])) diff --git a/tests/integration/drtsans/mono/test_transmission.py b/tests/integration/drtsans/mono/test_transmission.py index 2fb35ec2e..00a83ae12 100644 --- a/tests/integration/drtsans/mono/test_transmission.py +++ b/tests/integration/drtsans/mono/test_transmission.py @@ -28,7 +28,7 @@ def test_calculate_theta_dependent_transmission_single_value(workspace_with_inst drtsans functions employed: - apply_transmission_correction: - + """ # Generate a detector with five pixels, and embed the detector in a Mantid workspace diff --git a/tests/integration/drtsans/test_absolute_units.py b/tests/integration/drtsans/test_absolute_units.py index 6b69efb71..608f1331d 100644 --- a/tests/integration/drtsans/test_absolute_units.py +++ b/tests/integration/drtsans/test_absolute_units.py @@ -7,7 +7,7 @@ from mantid.simpleapi import CreateSingleValuedWorkspace r""" Links to drtsans imports -standard_sample_scaling +standard_sample_scaling """ from drtsans.absolute_units import standard_sample_scaling # noqa: E402 @@ -31,7 +31,7 @@ def test_standard_sample_measurement(temp_workspace_name): **drtsans functions used:** ~drtsans.absolute_units.standard_sample_scaling, - + """ # clean_workspace fixture will delete workspaces when the test finishes diff --git a/tests/integration/drtsans/test_momentum_transfer.py b/tests/integration/drtsans/test_momentum_transfer.py index f488675be..d70ae64ad 100644 --- a/tests/integration/drtsans/test_momentum_transfer.py +++ b/tests/integration/drtsans/test_momentum_transfer.py @@ -10,8 +10,8 @@ r""" Hyperlinks to drtsans functions -convert_to_q -namedtuplefy +convert_to_q +namedtuplefy """ # noqa: E501 from drtsans.momentum_transfer import convert_to_q from drtsans.settings import namedtuplefy diff --git a/tests/integration/drtsans/test_pixel_calibration.py b/tests/integration/drtsans/test_pixel_calibration.py index 82cf29816..4f6a14b02 100644 --- a/tests/integration/drtsans/test_pixel_calibration.py +++ b/tests/integration/drtsans/test_pixel_calibration.py @@ -283,7 +283,7 @@ def test_apparent_tube_width(data_apparent_tube_width, workspace_with_instrument **drtsans components used:** ~drtsans.tubecollection.TubeCollection - + """ data = data_apparent_tube_width # shortcut diff --git a/tests/integration/drtsans/test_stitch.py b/tests/integration/drtsans/test_stitch.py index 0a2a91836..adf326e2b 100644 --- a/tests/integration/drtsans/test_stitch.py +++ b/tests/integration/drtsans/test_stitch.py @@ -3,9 +3,9 @@ r""" Hyperlinks to drtsans functions -namedtuplefy -IQmod, testing -stitch_profiles +namedtuplefy +IQmod, testing +stitch_profiles """ from drtsans.settings import namedtuplefy from drtsans.dataobjects import IQmod, testing @@ -430,7 +430,7 @@ def test_stitch(data_test_16b, overlaps, throws_error): **drtsans functions used:** ~drtsans.stitch.stitch_profiles - + devs - Jose Borreguero SME - Wei-Ren Chen , LiLin He diff --git a/tests/integration/drtsans/test_subtract_background.py b/tests/integration/drtsans/test_subtract_background.py index 49d6dc8ce..9f4377a49 100644 --- a/tests/integration/drtsans/test_subtract_background.py +++ b/tests/integration/drtsans/test_subtract_background.py @@ -1,4 +1,4 @@ -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/api.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/api.py from drtsans import subtract_background from drtsans.dataobjects import DataType, IQazimuthal, IQmod from tests.conftest import assert_wksp_equal diff --git a/tests/integration/drtsans/test_tubecollection.py b/tests/integration/drtsans/test_tubecollection.py index e81709947..2c5a5347f 100644 --- a/tests/integration/drtsans/test_tubecollection.py +++ b/tests/integration/drtsans/test_tubecollection.py @@ -11,7 +11,7 @@ r""" Hyperlinks to drtsans functions -ElementComponentInfo, PixelInfo, TubeInfo, TubeCollection +ElementComponentInfo, PixelInfo, TubeInfo, TubeCollection """ # noqa: E501 from drtsans.instruments import fetch_idf from drtsans.settings import namedtuplefy diff --git a/tests/integration/drtsans/tof/eqsans/test_dark_current.py b/tests/integration/drtsans/tof/eqsans/test_dark_current.py index 728d35b88..935aad5b7 100644 --- a/tests/integration/drtsans/tof/eqsans/test_dark_current.py +++ b/tests/integration/drtsans/tof/eqsans/test_dark_current.py @@ -4,8 +4,8 @@ # CreateWorkspace from mantid.simpleapi import mtd, CreateWorkspace -# subtract_dark_current # noqa: E501 -# SampleLogs within +# subtract_dark_current # noqa: E501 +# SampleLogs within from drtsans.samplelogs import SampleLogs from drtsans.tof.eqsans.dark_current import subtract_dark_current @@ -112,9 +112,9 @@ def test_subtract_dark_current(data_test_16a, temp_workspace_name): **drtsans functions used:** ~drtsans.samplelogs.SampleLogs - + ~drtsans.tof.eqsans.dark_current.normalize_dark_current - + """ wavelength_bin_boundaries = np.arange( data_test_16a["l_min"], diff --git a/tests/integration/drtsans/tof/eqsans/test_normalization.py b/tests/integration/drtsans/tof/eqsans/test_normalization.py index 879cede3e..65108b26c 100644 --- a/tests/integration/drtsans/tof/eqsans/test_normalization.py +++ b/tests/integration/drtsans/tof/eqsans/test_normalization.py @@ -5,8 +5,8 @@ from mantid.simpleapi import CreateWorkspace from mantid.api import mtd -# SampleLogs within -# time, monitor within +# SampleLogs within +# time, monitor within from drtsans.samplelogs import SampleLogs from drtsans.tof.eqsans import ( load_events, @@ -105,9 +105,9 @@ def test_normalization_by_time(data_test_16a_by_time, temp_workspace_name): **drtsans functions used:** ~drtsans.samplelogs.SampleLogs - + ~drtsans.tof.normalization.normalize_by_time - + """ # Create a sample workspace with the input data data_workspace = temp_workspace_name() @@ -265,9 +265,9 @@ def test_normalization_by_monitor(data_test_16a_by_monitor, temp_workspace_name) **drtsans functions used:** ~drtsans.samplelogs.SampleLogs - + ~drtsans.tof.normalization.normalize_by_monitor - + """ # Input intensities from the test, only one value per detector pixel intensities_list = np.array(data_test_16a_by_monitor["I_sam"]).flatten() @@ -446,9 +446,9 @@ def test_normalize_by_proton_charge_and_flux(data_test_16a_by_proton_charge_and_ **drtsans functions used:** ~drtsans.samplelogs.SampleLogs - + ~drtsans.tof.normalization.normalize_by_proton_charge_and_flux - + """ test_data = data_test_16a_by_proton_charge_and_flux # handy shortcut # Input intensities from the test, only one value per detector pixel diff --git a/tests/integration/drtsans/tof/eqsans/test_transmission.py b/tests/integration/drtsans/tof/eqsans/test_transmission.py index a31857f5e..bec623164 100644 --- a/tests/integration/drtsans/tof/eqsans/test_transmission.py +++ b/tests/integration/drtsans/tof/eqsans/test_transmission.py @@ -18,14 +18,14 @@ r""" Hyperlinks to drtsans functions namedtuplefy available at: - -SampleLogs -insert_aperture_logs -prepare_data + +SampleLogs +insert_aperture_logs +prepare_data calculate_transmission, fit_raw_transmission available at: - -apply_transmission_correction -find_beam_center, center_detector + +apply_transmission_correction +find_beam_center, center_detector """ # noqa: E501 from drtsans.settings import namedtuplefy from drtsans.samplelogs import SampleLogs diff --git a/tests/unit/drtsans/mono/gpsans/test_attenuation.py b/tests/unit/drtsans/mono/gpsans/test_attenuation.py index 1ffc87bd7..d5f686eb0 100644 --- a/tests/unit/drtsans/mono/gpsans/test_attenuation.py +++ b/tests/unit/drtsans/mono/gpsans/test_attenuation.py @@ -1,10 +1,10 @@ #!/usr/bin/env python import pytest -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/mono/gpsans/attenuation.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/mono/gpsans/attenuation.py from drtsans.mono.gpsans import attenuation_factor -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/samplelogs.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/samplelogs.py from drtsans.samplelogs import SampleLogs diff --git a/tests/unit/drtsans/mono/test_dark_current.py b/tests/unit/drtsans/mono/test_dark_current.py index ae1548128..de0d7f915 100644 --- a/tests/unit/drtsans/mono/test_dark_current.py +++ b/tests/unit/drtsans/mono/test_dark_current.py @@ -8,9 +8,9 @@ r""" Hyperlinks to drtsans functions -SampleLogs -time -subtract_dark_current +SampleLogs +time +subtract_dark_current """ # noqa: E501 from drtsans.samplelogs import SampleLogs from drtsans.dark_current import duration diff --git a/tests/unit/drtsans/mono/test_momentum_transfer.py b/tests/unit/drtsans/mono/test_momentum_transfer.py index 6e5bdf78c..691175592 100644 --- a/tests/unit/drtsans/mono/test_momentum_transfer.py +++ b/tests/unit/drtsans/mono/test_momentum_transfer.py @@ -5,14 +5,14 @@ from collections import namedtuple from mantid.simpleapi import AddSampleLog -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/mono/convert_to_q.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/mono/convert_to_q.py from drtsans.mono.momentum_transfer import ( convert_to_q, mono_resolution, retrieve_instrument_setup, ) -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/resolution.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/resolution.py import drtsans.resolution diff --git a/tests/unit/drtsans/mono/test_normalization.py b/tests/unit/drtsans/mono/test_normalization.py index b40972111..623e6854b 100644 --- a/tests/unit/drtsans/mono/test_normalization.py +++ b/tests/unit/drtsans/mono/test_normalization.py @@ -9,8 +9,8 @@ r""" Hyperlinks to drtsans functions -SampleLogs -normalize_by_monitor, normalize_by_time +SampleLogs +normalize_by_monitor, normalize_by_time """ # noqa: E501 from drtsans.samplelogs import SampleLogs from drtsans.mono.gpsans import normalize_by_monitor, normalize_by_time, ZeroMonitorCountsError, NoMonitorMetadataError diff --git a/tests/unit/drtsans/test_dark_current.py b/tests/unit/drtsans/test_dark_current.py index 7df4565dc..7e02e3673 100644 --- a/tests/unit/drtsans/test_dark_current.py +++ b/tests/unit/drtsans/test_dark_current.py @@ -11,7 +11,7 @@ r""" Hyperlinks to drtsans functions -duration, counts_in_detector +duration, counts_in_detector """ # noqa: E501 from drtsans.dark_current import duration, counts_in_detector diff --git a/tests/unit/drtsans/test_i_of_q_1d_binning.py b/tests/unit/drtsans/test_i_of_q_1d_binning.py index 79d48e67a..896ed9a25 100644 --- a/tests/unit/drtsans/test_i_of_q_1d_binning.py +++ b/tests/unit/drtsans/test_i_of_q_1d_binning.py @@ -1,7 +1,7 @@ import numpy as np from drtsans.dataobjects import IQmod -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/iq.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/iq.py from drtsans.determine_bins import Bins from drtsans.iq import ( determine_1d_linear_bins, diff --git a/tests/unit/drtsans/test_i_of_q_2d_binning.py b/tests/unit/drtsans/test_i_of_q_2d_binning.py index 140eeda66..474607f63 100644 --- a/tests/unit/drtsans/test_i_of_q_2d_binning.py +++ b/tests/unit/drtsans/test_i_of_q_2d_binning.py @@ -1,6 +1,6 @@ from drtsans.dataobjects import IQazimuthal -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/iq.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/iq.py from drtsans.iq import determine_1d_linear_bins, BinningMethod, bin_intensity_into_q2d # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/tests/unit/drtsans/i_of_q_binning_tests_data.py diff --git a/tests/unit/drtsans/test_i_of_q_annular_binning.py b/tests/unit/drtsans/test_i_of_q_annular_binning.py index 107f49d87..987e39178 100644 --- a/tests/unit/drtsans/test_i_of_q_annular_binning.py +++ b/tests/unit/drtsans/test_i_of_q_annular_binning.py @@ -1,6 +1,6 @@ from drtsans.dataobjects import IQazimuthal -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/iq.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/iq.py from drtsans.iq import BinningMethod, BinningParams, bin_annular_into_q1d # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/tests/unit/drtsans/i_of_q_binning_tests_data.py diff --git a/tests/unit/drtsans/test_i_of_q_wedge_binning.py b/tests/unit/drtsans/test_i_of_q_wedge_binning.py index 69efdb762..53f7d321e 100644 --- a/tests/unit/drtsans/test_i_of_q_wedge_binning.py +++ b/tests/unit/drtsans/test_i_of_q_wedge_binning.py @@ -1,6 +1,6 @@ from drtsans.dataobjects import IQazimuthal, q_azimuthal_to_q_modulo -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/iq.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/iq.py from drtsans.iq import ( determine_1d_log_bins, BinningMethod, diff --git a/tests/unit/drtsans/test_i_q_azimuthal_to_i_q_modulo.py b/tests/unit/drtsans/test_i_q_azimuthal_to_i_q_modulo.py index 63a6ddf14..315db8c65 100644 --- a/tests/unit/drtsans/test_i_q_azimuthal_to_i_q_modulo.py +++ b/tests/unit/drtsans/test_i_q_azimuthal_to_i_q_modulo.py @@ -1,6 +1,6 @@ from drtsans.dataobjects import IQazimuthal, q_azimuthal_to_q_modulo -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/iq.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/iq.py from drtsans.iq import select_i_of_q_by_wedge # https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/tests/unit/drtsans/test_q_azimuthal_to_q_modulo.py diff --git a/tests/unit/drtsans/test_momentum_transfer.py b/tests/unit/drtsans/test_momentum_transfer.py index 5a0a9cd40..d11df808c 100644 --- a/tests/unit/drtsans/test_momentum_transfer.py +++ b/tests/unit/drtsans/test_momentum_transfer.py @@ -9,9 +9,9 @@ r""" Hyperlinks to drtsans functions -pixel_centers -convert_to_q, _filter_and_replicate, subpixel_info -namedtuplefy +pixel_centers +convert_to_q, _filter_and_replicate, subpixel_info +namedtuplefy """ # noqa: E501 from drtsans.geometry import pixel_centers from drtsans.momentum_transfer import convert_to_q, _filter_and_replicate, subpixel_info diff --git a/tests/unit/drtsans/test_sensitivity.py b/tests/unit/drtsans/test_sensitivity.py index cefb60194..02ebd209c 100644 --- a/tests/unit/drtsans/test_sensitivity.py +++ b/tests/unit/drtsans/test_sensitivity.py @@ -1,7 +1,7 @@ import pytest import numpy as np -# testing https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/sensitivity.py +# testing https://github.com/neutrons/drtsans/blob/next/src/drtsans/sensitivity.py from drtsans.sensitivity import apply_sensitivity_correction import os from tests.conftest import sns_data_dir diff --git a/tests/unit/drtsans/test_sensitivity_correction_moving_detectors.py b/tests/unit/drtsans/test_sensitivity_correction_moving_detectors.py index 8406f03c8..e7f6767a1 100644 --- a/tests/unit/drtsans/test_sensitivity_correction_moving_detectors.py +++ b/tests/unit/drtsans/test_sensitivity_correction_moving_detectors.py @@ -1,6 +1,6 @@ import numpy as np -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/mono/gpsans/prepare_sensitivity.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/mono/gpsans/prepare_sensitivity.py from drtsans.sensitivity_correction_moving_detectors import ( prepare_sensitivity, _mask_zero_count_pixel, diff --git a/tests/unit/drtsans/test_thickness_normalization.py b/tests/unit/drtsans/test_thickness_normalization.py index fe2db0159..d05dc2402 100644 --- a/tests/unit/drtsans/test_thickness_normalization.py +++ b/tests/unit/drtsans/test_thickness_normalization.py @@ -11,7 +11,7 @@ r""" Hyperlinks to drtsans functions -normalize_by_thickness """ # noqa: E501 from drtsans.thickness_normalization import normalize_by_thickness diff --git a/tests/unit/drtsans/test_transmission_correction.py b/tests/unit/drtsans/test_transmission_correction.py index 02f8a280a..4f27741d5 100644 --- a/tests/unit/drtsans/test_transmission_correction.py +++ b/tests/unit/drtsans/test_transmission_correction.py @@ -1,4 +1,4 @@ -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/transmission.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/transmission.py from drtsans.transmission import apply_transmission_correction import numpy as np import pytest diff --git a/tests/unit/drtsans/test_tubecollection.py b/tests/unit/drtsans/test_tubecollection.py index 7d5c8d907..4a57655fe 100644 --- a/tests/unit/drtsans/test_tubecollection.py +++ b/tests/unit/drtsans/test_tubecollection.py @@ -8,7 +8,7 @@ r""" Hyperlinks to the tubecollection - + """ # noqa: E501 from drtsans import tubecollection diff --git a/tests/unit/drtsans/tof/eqsans/test_convert_tof_to_wavelength.py b/tests/unit/drtsans/tof/eqsans/test_convert_tof_to_wavelength.py index 1761ced84..86678c43f 100644 --- a/tests/unit/drtsans/tof/eqsans/test_convert_tof_to_wavelength.py +++ b/tests/unit/drtsans/tof/eqsans/test_convert_tof_to_wavelength.py @@ -1,10 +1,10 @@ import numpy as np import pytest -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/samplelogs.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/samplelogs.py from drtsans.samplelogs import SampleLogs -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/tof/eqsans/correct_frame.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/tof/eqsans/correct_frame.py from drtsans.tof.eqsans.correct_frame import convert_to_wavelength diff --git a/tests/unit/drtsans/tof/eqsans/test_dark_current.py b/tests/unit/drtsans/tof/eqsans/test_dark_current.py index 441dbdd93..90269fbfb 100644 --- a/tests/unit/drtsans/tof/eqsans/test_dark_current.py +++ b/tests/unit/drtsans/tof/eqsans/test_dark_current.py @@ -22,8 +22,8 @@ r""" Hyperlinks to drtsans functions -SampleLogs -dark_current +SampleLogs +dark_current """ # noqa: E501 from drtsans.samplelogs import SampleLogs from drtsans.tof.eqsans import dark_current diff --git a/tests/unit/drtsans/tof/eqsans/test_normalization.py b/tests/unit/drtsans/tof/eqsans/test_normalization.py index a5c68bb81..bf343f2c2 100644 --- a/tests/unit/drtsans/tof/eqsans/test_normalization.py +++ b/tests/unit/drtsans/tof/eqsans/test_normalization.py @@ -12,10 +12,10 @@ r""" Hyperlinks to drtsans functions -SampleLogs -load_events -prepare_monitors -normalize_by_time,...load_flux_to_monitor_ratio_file +SampleLogs +load_events +prepare_monitors +normalize_by_time,...load_flux_to_monitor_ratio_file """ # noqa: E501 from drtsans.samplelogs import SampleLogs from drtsans.tof.eqsans import ( diff --git a/tests/unit/drtsans/tof/eqsans/test_resolution.py b/tests/unit/drtsans/tof/eqsans/test_resolution.py index 03a5cc252..a04bd275e 100644 --- a/tests/unit/drtsans/tof/eqsans/test_resolution.py +++ b/tests/unit/drtsans/tof/eqsans/test_resolution.py @@ -7,7 +7,7 @@ from scipy import constants import pytest -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/tof/eqsans/convert_to_q.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/tof/eqsans/convert_to_q.py from drtsans.tof.eqsans.momentum_transfer import ( eqsans_resolution, moderator_time_uncertainty, diff --git a/tests/unit/drtsans/tof/eqsans/test_transmission.py b/tests/unit/drtsans/tof/eqsans/test_transmission.py index 5dab8d2a4..5e694ecc9 100644 --- a/tests/unit/drtsans/tof/eqsans/test_transmission.py +++ b/tests/unit/drtsans/tof/eqsans/test_transmission.py @@ -5,9 +5,9 @@ # https://docs.mantidproject.org/nightly/algorithms/LoadNexus-v1.html from mantid.simpleapi import LoadNexus, mtd -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/settings.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/tof/eqsans/correct_frame.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/drtsans/tof/eqsans/transmission.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/settings.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/tof/eqsans/correct_frame.py +# https://github.com/neutrons/drtsans/blob/next/src/drtsans/tof/eqsans/transmission.py from drtsans.settings import namedtuplefy # noqa: E402 from drtsans.samplelogs import SampleLogs # noqa: E402 from drtsans.tof.eqsans.correct_frame import transmitted_bands # noqa: E402 From 5cdc3720437a94b1a7745b5a291871e8a8a2d052 Mon Sep 17 00:00:00 2001 From: glass-ships Date: Tue, 11 Feb 2025 11:06:16 -0500 Subject: [PATCH 22/23] one more update --- src/drtsans/iq.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/drtsans/iq.py b/src/drtsans/iq.py index fb8a4f056..031ee6258 100644 --- a/src/drtsans/iq.py +++ b/src/drtsans/iq.py @@ -1,5 +1,4 @@ # https://github.com/neutrons/drtsans/blob/next/src/drtsans/dataobjects.py -# https://code.ornl.gov/sns-hfir-scse/sans/sans-backend/blob/next/docs/drtsans/dataobjects.rst import numpy from drtsans.dataobjects import ( From 5197bb1f37dad4d9d05f1aaa250642f602c27cf7 Mon Sep 17 00:00:00 2001 From: glass-ships Date: Tue, 11 Feb 2025 11:08:38 -0500 Subject: [PATCH 23/23] add type hinting to iq.py --- src/drtsans/iq.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/drtsans/iq.py b/src/drtsans/iq.py index 031ee6258..925fdaab2 100644 --- a/src/drtsans/iq.py +++ b/src/drtsans/iq.py @@ -1,23 +1,24 @@ # https://github.com/neutrons/drtsans/blob/next/src/drtsans/dataobjects.py +from enum import Enum +from typing import Any, List, Tuple, Union + import numpy +import numpy as np from drtsans.dataobjects import ( DataType, - getDataType, IQazimuthal, IQmod, - q_azimuthal_to_q_modulo, concatenate, + getDataType, + q_azimuthal_to_q_modulo, ) -from enum import Enum -from typing import List, Any, Tuple -import numpy as np # https://github.com/neutrons/drtsans/blob/next/src/drtsans/determine_bins.py from drtsans.determine_bins import ( - determine_1d_log_bins, - determine_1d_linear_bins, BinningParams, + determine_1d_linear_bins, + determine_1d_log_bins, ) # To ignore warning: invalid value encountered in true_divide @@ -46,7 +47,7 @@ class BinningMethod(Enum): WEIGHTED = 2 # weighted binning -def check_iq_for_binning(i_of_q): +def check_iq_for_binning(i_of_q: Union[IQmod, IQazimuthal]): """Check I(Q) for binning. Binning I(Q) assumes that