diff --git a/.github/workflows/lint-and-build.yml b/.github/workflows/lint-and-build.yml index a7df7e76..6623ff54 100644 --- a/.github/workflows/lint-and-build.yml +++ b/.github/workflows/lint-and-build.yml @@ -12,6 +12,7 @@ on: branches: - main - master + - dev* paths: - "**.py" - "**.ui" diff --git a/.github/workflows/printenv.yml b/.github/workflows/printenv.yml index b9b81cbf..ed1adcbf 100644 --- a/.github/workflows/printenv.yml +++ b/.github/workflows/printenv.yml @@ -8,10 +8,10 @@ on: required: true default: false type: boolean - # push: - # branches: - # - master - # - dev + push: + branches: + - master + - dev env: GITHUB_HEAD_REPOSITORY: ${{ github.event.pull_request.head.repo.full_name }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7b8f88e8..6aed4fc9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,12 +17,11 @@ repos: hooks: - id: pretty-format-ini args: [--autofix] - # TODO: Re-enable in dev, master doesn't have Ruff configs - # - repo: https://github.com/charliermarsh/ruff-pre-commit - # rev: "v0.0.262" # Must match requirements-dev.txt - # hooks: - # - id: ruff - # args: [--fix] + - repo: https://github.com/charliermarsh/ruff-pre-commit + rev: "v0.0.269" # Must match requirements-dev.txt + hooks: + - id: ruff + args: [--fix] - repo: https://github.com/pre-commit/mirrors-autopep8 rev: "v2.0.2" # Must match requirements-dev.txt hooks: diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 6e71f39c..4ecf5a13 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -5,6 +5,7 @@ "davidanson.vscode-markdownlint", "eamodio.gitlens", "emeraldwalk.runonsave", + "github.vscode-github-actions", "ms-python.autopep8", "ms-python.python", "ms-python.vscode-pylance", diff --git a/README.md b/README.md index adc0d6c7..65a6851e 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,7 @@ This program can be used to automatically start, split, and reset your preferred - Perceptual Hash: An explanation on pHash comparison can be found [here](http://www.hackerfactor.com/blog/index.php?/archives/432-Looks-Like-It.html). It is highly recommended to NOT use pHash if you use masked images, or it'll be very inaccurate. #### Capture Method + - **Windows Graphics Capture** (fast, most compatible, capped at 60fps) Only available in Windows 10.0.17134 and up. @@ -83,13 +84,13 @@ This program can be used to automatically start, split, and reset your preferred - **Force Full Content Rendering** (very slow, can affect rendering) Uses BitBlt behind the scene, but passes a special flag to PrintWindow to force rendering the entire desktop. About 10-15x slower than BitBlt based on original window size and can mess up some applications' rendering pipelines. -- **Video Capture Device** +- **Video Capture Device** Uses a Video Capture Device, like a webcam, virtual cam, or capture card. - If you want to use this with OBS' Virtual Camera, use the [Virtualcam plugin](https://github.com/Avasam/obs-virtual-cam/releases) instead. #### Capture Device -Select the Video Capture Device that you wanna use if selecting the `Video Capture Device` Capture Method. +Select the Video Capture Device that you wanna use if selecting the `Video Capture Device` Capture Method. + #### Show Live Similarity @@ -216,7 +217,6 @@ The AutoSplit LiveSplit Component will directly connect AutoSplit with LiveSplit - For many games, it will be difficult to find a split image for the last split of the run. - The window of the capture region cannot be minimized. -- OBS' integrated Virtual Camera does not work and makes AutoSplit crash. ## Resources diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md index fbe45ef0..1361ffac 100644 --- a/docs/CONTRIBUTING.md +++ b/docs/CONTRIBUTING.md @@ -28,6 +28,12 @@ Your Pull Request has to pass all checks ot be accepted. If it is still a work-i Most coding standards will be enforced by automated tooling. As time goes on, project-specific standards and "gotchas" in the frameworks we use will be listed here. +### Magic numbers + +Please avoid using magic numbers and prefer constants and enums that have a meaningful name when possible. +If a constant is shared throughout the app, it should live in `src/utils.py`. Unless it is very-specific to a module. +For image shape and channels, please use `utils.ImageShape` and `utils.ColorChannel`. + ## Testing None 😦 Please help us create test suites, we lack the time, but we really want (need!) them. diff --git a/pyproject.toml b/pyproject.toml index b84e0c21..f688afa6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,10 +46,13 @@ ignore = [ ### FIXME (no warnings in Ruff yet: https://github.com/charliermarsh/ruff/issues/1256): "PTH", + # Ignore until linux support + "EXE", ] [tool.ruff.per-file-ignores] "typings/**/*.pyi" = [ + "F811", # Re-exports false positives "F821", # https://github.com/charliermarsh/ruff/issues/3011 # The following can't be controlled for external libraries: "N8", # Naming conventions @@ -73,7 +76,7 @@ required-imports = ["from __future__ import annotations"] known-local-folder = [ "capture_method", "gen", - "AutoControlledWorker", + "AutoControlledThread", "AutoSplit", "AutoSplitImage", "compare", @@ -104,7 +107,6 @@ max-branches = 15 # https://github.com/hhatto/autopep8#more-advanced-usage [tool.autopep8] max_line_length = 120 -recursive = true aggressive = 3 ignore = [ "E124", # Closing bracket may not match multi-line method invocation style (enforced by add-trailing-comma) @@ -138,7 +140,7 @@ reportUnusedCallResult = "none" reportMissingTypeStubs = "warning" # False positives with TYPE_CHECKING reportImportCycles = "information" -# False positives with PyQt .connect +# False positives with PySide .connect reportFunctionMemberAccess = "none" # Extra runtime safety reportUnnecessaryComparison = "warning" diff --git a/res/about.ui b/res/about.ui index 25d8fae5..b9e1ee1a 100644 --- a/res/about.ui +++ b/res/about.ui @@ -97,7 +97,7 @@ Thank you! 60 150 147 - 47 + 51 diff --git a/res/design.ui b/res/design.ui index 7309b7d4..f4c22050 100644 --- a/res/design.ui +++ b/res/design.ui @@ -972,6 +972,9 @@ F1 + + Qt::ApplicationShortcut + @@ -988,6 +991,9 @@ Ctrl+S + + Qt::ApplicationShortcut + @@ -996,6 +1002,9 @@ Ctrl+O + + Qt::ApplicationShortcut + @@ -1004,6 +1013,9 @@ Ctrl+Shift+S + + Qt::ApplicationShortcut + @@ -1017,6 +1029,9 @@ Ctrl+, + + Qt::ApplicationShortcut + QAction::PreferencesRole diff --git a/res/opencv_python_headless-4.7.0+be945d8-cp37-abi3-win_amd64.whl b/res/opencv_python_headless-4.7.0+be945d8-cp37-abi3-win_amd64.whl new file mode 100644 index 00000000..585a6efe Binary files /dev/null and b/res/opencv_python_headless-4.7.0+be945d8-cp37-abi3-win_amd64.whl differ diff --git a/scripts/build.ps1 b/scripts/build.ps1 index 840db0c2..f9825aaf 100644 --- a/scripts/build.ps1 +++ b/scripts/build.ps1 @@ -1,10 +1,11 @@ & "$PSScriptRoot/compile_resources.ps1" $arguments = @( + "$PSScriptRoot/../src/AutoSplit.py", '--onefile', '--windowed', '--additional-hooks-dir=Pyinstaller/hooks', '--icon=res/icon.ico', '--splash=res/splash.png') -pyinstaller $arguments "$PSScriptRoot/../src/AutoSplit.py" +Start-Process -Wait -NoNewWindow pyinstaller -ArgumentList $arguments diff --git a/scripts/compile_resources.ps1 b/scripts/compile_resources.ps1 index 623a0acb..91a180a3 100644 --- a/scripts/compile_resources.ps1 +++ b/scripts/compile_resources.ps1 @@ -2,11 +2,17 @@ $originalDirectory = $pwd Set-Location "$PSScriptRoot/.." New-Item -Force -ItemType directory ./src/gen | Out-Null -pyuic6 './res/about.ui' -o './src/gen/about.py' -pyuic6 './res/design.ui' -o './src/gen/design.py' -pyuic6 './res/settings.ui' -o './src/gen/settings.py' -pyuic6 './res/update_checker.ui' -o './src/gen/update_checker.py' +pyside6-uic './res/about.ui' -o './src/gen/about.py' +pyside6-uic './res/design.ui' -o './src/gen/design.py' +pyside6-uic './res/settings.ui' -o './src/gen/settings.py' +pyside6-uic './res/update_checker.ui' -o './src/gen/update_checker.py' pyside6-rcc './res/resources.qrc' -o './src/gen/resources_rc.py' +$files = Get-ChildItem ./src/gen/ *.py +foreach ($file in $files) { + (Get-Content $file.PSPath) | + ForEach-Object { $_ -replace 'import resources_rc', 'from . import resources_rc' } | + Set-Content $file.PSPath +} Write-Host 'Generated code from .ui files' $build_vars_path = "$PSScriptRoot/../src/gen/build_vars.py" diff --git a/scripts/designer.ps1 b/scripts/designer.ps1 index 13d2450a..a6a159f6 100644 --- a/scripts/designer.ps1 +++ b/scripts/designer.ps1 @@ -1,4 +1,10 @@ -$qt6_applications_path = python3 -c 'import qt6_applications; print(qt6_applications.__path__[0])' +$qt6_applications_import = 'import qt6_applications; print(qt6_applications.__path__[0])' +$qt6_applications_path = python -c $qt6_applications_import +if ($null -eq $qt6_applications_path) { + Write-Host 'Designer not found, installing qt6_applications' + python -m pip install qt6_applications +} +$qt6_applications_path = python -c $qt6_applications_import & "$qt6_applications_path/Qt/bin/designer" ` "$PSScriptRoot/../res/design.ui" ` "$PSScriptRoot/../res/about.ui" ` diff --git a/scripts/install.ps1 b/scripts/install.ps1 index cac7c468..1a513d35 100644 --- a/scripts/install.ps1 +++ b/scripts/install.ps1 @@ -1,14 +1,7 @@ -# Alias python3 to python on Windows -If ($IsWindows) { - $python = (Get-Command python).Source - $python3 = "$((Get-Item $python).Directory.FullName)/python3.exe" - New-Item -ItemType SymbolicLink -Path $python3 -Target $python -ErrorAction SilentlyContinue -} - # Installing Python dependencies $dev = If ($Env:GITHUB_JOB -eq 'Build') { '' } Else { '-dev' } # Ensures installation tools are up to date. This also aliases pip to pip3 on MacOS. -python3 -m pip install wheel pip setuptools --upgrade +python -m pip install wheel pip setuptools --upgrade pip install -r "$PSScriptRoot/requirements$dev.txt" --upgrade # Don't compile resources on the Build CI job as it'll do so in build script diff --git a/scripts/requirements-dev.txt b/scripts/requirements-dev.txt index aeb7a1e6..9898efaa 100644 --- a/scripts/requirements-dev.txt +++ b/scripts/requirements-dev.txt @@ -11,16 +11,16 @@ add-trailing-comma>=2.3.0 # Added support for with statement autopep8>=2.0.0 # New checks ruff>=0.0.269 # New TODO and PYI violations # -# Run `./scripts/designer.ps1` to quickly open the bundled PyQt Designer. +# Run `./scripts/designer.ps1` to quickly open the bundled Qt Designer. # Can also be downloaded externally as a non-python package # qt6-applications # Types -types-d3dshot +types-D3DShot ; sys_platform == 'win32' types-keyboard types-Pillow types-psutil types-PyAutoGUI types-pyinstaller -types-pywin32 +types-pywin32 ; sys_platform == 'win32' types-requests types-toml diff --git a/scripts/requirements.txt b/scripts/requirements.txt index 8f841ebf..1ad1ac5e 100644 --- a/scripts/requirements.txt +++ b/scripts/requirements.txt @@ -7,12 +7,14 @@ certifi ImageHash>=4.3.1 # Contains type information + setup as package not module git+https://github.com/boppreh/keyboard.git#egg=keyboard # Fix install on macos and linux-ci https://github.com/boppreh/keyboard/pull/568 numpy>=1.23.2 # Python 3.11 wheels -opencv-python-headless>=4.6 # Breaking changes importing cv2.cv2 +# opencv-python-headless>=4.6 # Breaking changes importing cv2.cv2 +./res/opencv_python_headless-4.7.0+be945d8-cp37-abi3-win_amd64.whl # New typing + OBS Camera fixes packaging Pillow>=9.2 # gnome-screeshot checks psutil PyAutoGUI -PyQt6>=6.4.2 # Python 3.11 support +PyWinCtl>=0.0.42 # py.typed +PySide6-Essentials>=6.5.1 # fixes incomplete tuple return types https://bugreports.qt.io/browse/PYSIDE-2285 requests<=2.28.1 # 2.28.2 has issues with PyInstaller https://github.com/pyinstaller/pyinstaller-hooks-contrib/issues/534 toml typing-extensions>=4.4.0 # @override decorator support @@ -20,7 +22,6 @@ typing-extensions>=4.4.0 # @override decorator support # Build and compile resources pyinstaller>=5.5 # Python 3.11 support pyinstaller-hooks-contrib>=2022.9 # opencv-python 4.6 support. Changes for pywintypes and comtypes -PySide6-Essentials>=6.4.0.1 # Python 3.11 support # # https://peps.python.org/pep-0508/#environment-markers # diff --git a/scripts/start.ps1 b/scripts/start.ps1 index d1e8ec08..70d6fd8b 100644 --- a/scripts/start.ps1 +++ b/scripts/start.ps1 @@ -1,3 +1,3 @@ param ([string]$p1) & "$PSScriptRoot/compile_resources.ps1" -python3 "$PSScriptRoot/../src/AutoSplit.py" $p1 +python "$PSScriptRoot/../src/AutoSplit.py" $p1 diff --git a/src/AutoControlledWorker.py b/src/AutoControlledThread.py similarity index 94% rename from src/AutoControlledWorker.py rename to src/AutoControlledThread.py index 5b3238c1..f5e518a8 100644 --- a/src/AutoControlledWorker.py +++ b/src/AutoControlledThread.py @@ -2,7 +2,7 @@ from typing import TYPE_CHECKING -from PyQt6 import QtCore +from PySide6 import QtCore import error_messages import user_profile @@ -11,11 +11,12 @@ from AutoSplit import AutoSplit -class AutoControlledWorker(QtCore.QObject): +class AutoControlledThread(QtCore.QThread): def __init__(self, autosplit: AutoSplit): self.autosplit = autosplit super().__init__() + @QtCore.Slot() def run(self): while True: try: diff --git a/src/AutoSplit.py b/src/AutoSplit.py index bfa1d949..aa6706df 100644 --- a/src/AutoSplit.py +++ b/src/AutoSplit.py @@ -5,6 +5,7 @@ import os import signal import sys +from collections.abc import Callable from time import time from types import FunctionType from typing import NoReturn @@ -12,14 +13,14 @@ import certifi import cv2 from psutil import process_iter -from PyQt6 import QtCore, QtGui -from PyQt6.QtTest import QTest -from PyQt6.QtWidgets import QApplication, QFileDialog, QLabel, QMainWindow, QMessageBox, QWidget +from PySide6 import QtCore, QtGui +from PySide6.QtTest import QTest +from PySide6.QtWidgets import QApplication, QFileDialog, QLabel, QMainWindow, QMessageBox from typing_extensions import override import error_messages import user_profile -from AutoControlledWorker import AutoControlledWorker +from AutoControlledThread import AutoControlledThread from AutoSplitImage import START_KEYWORD, AutoSplitImage, ImageType from capture_method import CaptureMethodBase, CaptureMethodEnum from gen import about, design, settings, update_checker @@ -37,23 +38,17 @@ from region_selection import align_region, select_region, select_window, validate_before_parsing from split_parser import BELOW_FLAG, DUMMY_FLAG, PAUSE_FLAG, parse_and_validate_images from user_profile import DEFAULT_PROFILE -from utils import ( - AUTOSPLIT_VERSION, - FIRST_WIN_11_BUILD, - FROZEN, - WINDOWS_BUILD_NUMBER, - auto_split_directory, - decimal, - is_valid_image, - open_file, -) +from utils import AUTOSPLIT_VERSION, FROZEN, auto_split_directory, decimal, is_valid_image, open_file CHECK_FPS_ITERATIONS = 10 +DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2 = 2 # Needed when compiled, along with the custom hook-requests PyInstaller hook os.environ["REQUESTS_CA_BUNDLE"] = certifi.where() myappid = f"Toufool.AutoSplit.v{AUTOSPLIT_VERSION}" ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) +# qt.qpa.window: SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2) failed: COM error 0x5: Access is denied. # noqa: E501 +# ctypes.windll.user32.SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2) class AutoSplit(QMainWindow, design.Ui_MainWindow): @@ -61,16 +56,16 @@ class AutoSplit(QMainWindow, design.Ui_MainWindow): is_auto_controlled = "--auto-controlled" in sys.argv # Signals - start_auto_splitter_signal = QtCore.pyqtSignal() - reset_signal = QtCore.pyqtSignal() - skip_split_signal = QtCore.pyqtSignal() - undo_split_signal = QtCore.pyqtSignal() - pause_signal = QtCore.pyqtSignal() - after_setting_hotkey_signal = QtCore.pyqtSignal() - update_checker_widget_signal = QtCore.pyqtSignal(str, bool) - load_start_image_signal = QtCore.pyqtSignal([], [bool], [bool, bool]) + start_auto_splitter_signal = QtCore.Signal() + reset_signal = QtCore.Signal() + skip_split_signal = QtCore.Signal() + undo_split_signal = QtCore.Signal() + pause_signal = QtCore.Signal() + after_setting_hotkey_signal = QtCore.Signal() + update_checker_widget_signal = QtCore.Signal(str, bool) + load_start_image_signal = QtCore.Signal(bool, bool) # Use this signal when trying to show an error from outside the main thread - show_error_signal = QtCore.pyqtSignal(FunctionType) + show_error_signal = QtCore.Signal(FunctionType) # Timers timer_live_image = QtCore.QTimer() @@ -110,25 +105,19 @@ class AutoSplit(QMainWindow, design.Ui_MainWindow): reset_image: AutoSplitImage | None = None split_images: list[AutoSplitImage] = [] split_image: AutoSplitImage | None = None - update_auto_control: QtCore.QThread | None = None + update_auto_control: AutoControlledThread | None = None - def __init__(self, parent: QWidget | None = None): # noqa: PLR0915 - super().__init__(parent) + def __init__(self): # noqa: PLR0915 + super().__init__() # Setup global error handling - self.show_error_signal.connect(lambda error_message_box: error_message_box()) + def _show_error_signal_slot(error_message_box: Callable[..., object]): + return error_message_box() + self.show_error_signal.connect(_show_error_signal_slot) sys.excepthook = error_messages.make_excepthook(self) self.setupUi(self) self.setWindowTitle(f"AutoSplit v{AUTOSPLIT_VERSION}") - # Spinbox frame disappears and reappears on Windows 11. It's much cleaner to just disable them. - # Most likely related: https://bugreports.qt.io/browse/QTBUG-95215?jql=labels%20%3D%20Windows11 - # Arrow buttons tend to move a lot as well - if WINDOWS_BUILD_NUMBER >= FIRST_WIN_11_BUILD: - self.x_spinbox.setFrame(False) - self.y_spinbox.setFrame(False) - self.width_spinbox.setFrame(False) - self.height_spinbox.setFrame(False) # Hotkeys need to be initialized to be passed as thread arguments in hotkeys.py for hotkey in HOTKEYS: @@ -146,10 +135,7 @@ def __init__(self, parent: QWidget | None = None): # noqa: PLR0915 print(f"{AUTOSPLIT_VERSION}\n{os.getpid()}", flush=True) # Use and Start the thread that checks for updates from LiveSplit - self.update_auto_control = QtCore.QThread() - worker = AutoControlledWorker(self) - worker.moveToThread(self.update_auto_control) - self.update_auto_control.started.connect(worker.run) + self.update_auto_control = AutoControlledThread(self) self.update_auto_control.start() # split image folder line edit text @@ -162,23 +148,10 @@ def __init__(self, parent: QWidget | None = None): # noqa: PLR0915 self.action_about_qt_for_python.triggered.connect(about_qt_for_python) self.action_check_for_updates.triggered.connect(lambda: check_for_updates(self)) self.action_settings.triggered.connect(lambda: open_settings(self)) - # PyQt6 typing is wrong - self.action_save_profile.triggered.connect( - lambda: user_profile.save_settings(self), # pyright: ignore[reportGeneralTypeIssues] - ) - self.action_save_profile_as.triggered.connect( - lambda: user_profile.save_settings_as(self), # pyright: ignore[reportGeneralTypeIssues] - ) + self.action_save_profile.triggered.connect(lambda: user_profile.save_settings(self)) + self.action_save_profile_as.triggered.connect(lambda: user_profile.save_settings_as(self)) self.action_load_profile.triggered.connect(lambda: user_profile.load_settings(self)) - # Shortcut context can't be set through the designer because of a bug in pyuic6 that generates invalid code - # Email sent to pyqt@riverbankcomputing.com - self.action_view_help.setShortcutContext(QtCore.Qt.ShortcutContext.ApplicationShortcut) - self.action_settings.setShortcutContext(QtCore.Qt.ShortcutContext.ApplicationShortcut) - self.action_save_profile.setShortcutContext(QtCore.Qt.ShortcutContext.ApplicationShortcut) - self.action_save_profile_as.setShortcutContext(QtCore.Qt.ShortcutContext.ApplicationShortcut) - self.action_load_profile.setShortcutContext(QtCore.Qt.ShortcutContext.ApplicationShortcut) - # Connecting button clicks to functions self.browse_button.clicked.connect(self.__browse) self.select_region_button.clicked.connect(lambda: select_region(self)) @@ -206,12 +179,12 @@ def __init__(self, parent: QWidget | None = None): # noqa: PLR0915 # connect signals to functions self.after_setting_hotkey_signal.connect(lambda: after_setting_hotkey(self)) self.start_auto_splitter_signal.connect(self.__auto_splitter) - self.update_checker_widget_signal.connect( - lambda latest_version, check_on_open: open_update_checker(self, latest_version, check_on_open), - ) + + def _update_checker_widget_signal_slot(latest_version: str, check_on_open: bool): + return open_update_checker(self, latest_version, check_on_open) + self.update_checker_widget_signal.connect(_update_checker_widget_signal_slot) + self.load_start_image_signal.connect(self.__load_start_image) - self.load_start_image_signal[bool].connect(self.__load_start_image) - self.load_start_image_signal[bool, bool].connect(self.__load_start_image) self.reset_signal.connect(self.reset) self.skip_split_signal.connect(self.skip_split) self.undo_split_signal.connect(self.undo_split) @@ -254,12 +227,14 @@ def __browse(self): # set the split image folder line to the directory text self.settings_dict["split_image_directory"] = new_split_image_directory self.split_image_folder_input.setText(f"{new_split_image_directory}/") - self.load_start_image_signal.emit() + self.load_start_image_signal.emit(False, True) def __update_live_image_details(self, capture: cv2.Mat | None, called_from_timer: bool = False): # HACK: Since this is also called in __get_capture_for_comparison, # we don't need to update anything if the app is running - if called_from_timer and not (self.is_running or self.start_image): + if called_from_timer: + if self.is_running or self.start_image: + return capture, _ = self.capture_method.get_frame(self) # Update title from target window or Capture Device name @@ -271,9 +246,9 @@ def __update_live_image_details(self, capture: cv2.Mat | None, called_from_timer # Simply clear if "live capture region" setting is off if not (self.settings_dict["live_capture_region"] and capture_region_window_label): self.live_image.clear() - return - - set_preview_image(self.live_image, capture, False) + # Set live image in UI + else: + set_preview_image(self.live_image, capture, False) def __load_start_image(self, started_by_button: bool = False, wait_for_delay: bool = True): """Not thread safe (if triggered by LiveSplit for example). Use `load_start_image_signal.emit` instead.""" @@ -450,7 +425,7 @@ def __is_current_split_out_of_range(self): or self.split_image_number > len(self.split_images_and_loop_number) - 1 def undo_split(self, navigate_image_only: bool = False): - """"Undo Split" and "Prev. Img." buttons connect to here.""" + """Undo Split" and "Prev. Img." buttons connect to here.""" # Can't undo until timer is started # or Undoing past the first image if not self.is_running \ @@ -472,7 +447,7 @@ def undo_split(self, navigate_image_only: bool = False): send_command(self, "undo") def skip_split(self, navigate_image_only: bool = False): - """"Skip Split" and "Next Img." buttons connect to here.""" + """Skip Split" and "Next Img." buttons connect to here.""" # Can't skip or split until timer is started # or Splitting/skipping when there are no images left if not self.is_running \ @@ -518,7 +493,7 @@ def start_auto_splitter(self): self.start_auto_splitter_signal.emit() def __check_for_reset_state_update_ui(self): - """Check if AutoSplit is started, if not either restart (loop splits) or update the GUI.""" + """Check if AutoSplit is started, if not then update the GUI.""" if not self.is_running: self.gui_changes_on_reset(True) return True @@ -792,7 +767,7 @@ def gui_changes_on_reset(self, safe_to_reload_start_image: bool = False): QApplication.processEvents() if safe_to_reload_start_image: - self.load_start_image_signal[bool, bool].emit(False, False) + self.load_start_image_signal.emit(False, False) def __get_capture_for_comparison(self): """Grab capture region and resize for comparison.""" @@ -875,15 +850,15 @@ def __update_split_image(self, specific_image: AutoSplitImage | None = None): self.image_loop_value_label.setText(f"{loop_tuple[1]}/{loop_tuple[0].loops}") @override - def closeEvent(self, a0: QtGui.QCloseEvent | None = None): + def closeEvent(self, event: QtGui.QCloseEvent | None = None): """Exit safely when closing the window.""" def exit_program() -> NoReturn: if self.update_auto_control: self.update_auto_control.terminate() self.capture_method.close(self) - if a0 is not None: - a0.accept() + if event is not None: + event.accept() if self.is_auto_controlled: # stop main thread (which is probably blocked reading input) via an interrupt signal os.kill(os.getpid(), signal.SIGINT) @@ -892,7 +867,7 @@ def exit_program() -> NoReturn: # Simulates LiveSplit quitting without asking. See "TODO" at update_auto_control Worker # This also more gracefully exits LiveSplit # Users can still manually save their settings - if a0 is None: + if event is None: exit_program() if user_profile.have_settings_changed(self): @@ -913,11 +888,11 @@ def exit_program() -> NoReturn: if user_profile.save_settings(self): exit_program() else: - a0.ignore() + event.ignore() if warning is QMessageBox.StandardButton.No: exit_program() if warning is QMessageBox.StandardButton.Cancel: - a0.ignore() + event.ignore() else: exit_program() @@ -965,6 +940,9 @@ def is_already_open(): def main(): + # Best to call setStyle before the QApplication constructor + # https://doc.qt.io/qt-6/qapplication.html#setStyle-1 + QApplication.setStyle("fusion") # Call to QApplication outside the try-except so we can show error messages app = QApplication(sys.argv) try: diff --git a/src/capture_method/BitBltCaptureMethod.py b/src/capture_method/BitBltCaptureMethod.py index ed7b4e7f..c65dda80 100644 --- a/src/capture_method/BitBltCaptureMethod.py +++ b/src/capture_method/BitBltCaptureMethod.py @@ -22,6 +22,14 @@ class BitBltCaptureMethod(CaptureMethodBase): + name = "BitBlt" + short_description = "fastest, least compatible" + description = ( + "\nThe best option when compatible. But it cannot properly record " + + "\nOpenGL, Hardware Accelerated or Exclusive Fullscreen windows. " + + "\nThe smaller the selected region, the more efficient it is. " + ) + _render_full_content = False def get_frame(self, autosplit: AutoSplit) -> tuple[cv2.Mat | None, bool]: diff --git a/src/capture_method/CaptureMethodBase.py b/src/capture_method/CaptureMethodBase.py index b391fe45..bc078b50 100644 --- a/src/capture_method/CaptureMethodBase.py +++ b/src/capture_method/CaptureMethodBase.py @@ -11,6 +11,10 @@ class CaptureMethodBase(): + name = "None" + short_description = "" + description = "" + def __init__(self, autosplit: AutoSplit | None): # Some capture methods don't need an initialization process pass diff --git a/src/capture_method/DesktopDuplicationCaptureMethod.py b/src/capture_method/DesktopDuplicationCaptureMethod.py index c7742cff..da81bc46 100644 --- a/src/capture_method/DesktopDuplicationCaptureMethod.py +++ b/src/capture_method/DesktopDuplicationCaptureMethod.py @@ -9,13 +9,25 @@ from win32 import win32gui from capture_method.BitBltCaptureMethod import BitBltCaptureMethod -from utils import get_window_bounds +from utils import GITHUB_REPOSITORY, get_window_bounds if TYPE_CHECKING: from AutoSplit import AutoSplit class DesktopDuplicationCaptureMethod(BitBltCaptureMethod): + name = "Direct3D Desktop Duplication" + short_description = "slower, bound to display" + description = ( + "\nDuplicates the desktop using Direct3D. " + + "\nIt can record OpenGL and Hardware Accelerated windows. " + + "\nAbout 10-15x slower than BitBlt. Not affected by window size. " + + "\nOverlapping windows will show up and can't record across displays. " + + "\nThis option may not be available for hybrid GPU laptops, " + + "\nsee D3DDD-Note-Laptops.md for a solution. " + + f"\nhttps://www.github.com/{GITHUB_REPOSITORY}#capture-method " + ) + def __init__(self, autosplit: AutoSplit | None): super().__init__(autosplit) # Must not set statically as some laptops will throw an error diff --git a/src/capture_method/ForceFullContentRenderingCaptureMethod.py b/src/capture_method/ForceFullContentRenderingCaptureMethod.py index 384ef027..6bbcd70e 100644 --- a/src/capture_method/ForceFullContentRenderingCaptureMethod.py +++ b/src/capture_method/ForceFullContentRenderingCaptureMethod.py @@ -4,4 +4,12 @@ class ForceFullContentRenderingCaptureMethod(BitBltCaptureMethod): + name = "Force Full Content Rendering" + short_description = "very slow, can affect rendering" + description = ( + "\nUses BitBlt behind the scene, but passes a special flag " + + "\nto PrintWindow to force rendering the entire desktop. " + + "\nAbout 10-15x slower than BitBlt based on original window size " + + "\nand can mess up some applications' rendering pipelines. " + ) _render_full_content = True diff --git a/src/capture_method/VideoCaptureDeviceCaptureMethod.py b/src/capture_method/VideoCaptureDeviceCaptureMethod.py index defd2727..e557ef48 100644 --- a/src/capture_method/VideoCaptureDeviceCaptureMethod.py +++ b/src/capture_method/VideoCaptureDeviceCaptureMethod.py @@ -14,17 +14,24 @@ if TYPE_CHECKING: from AutoSplit import AutoSplit -OBS_CAMERA_BLANK_PIXEL = [127, 129, 128] +OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL = [127, 129, 128] def is_blank(image: cv2.Mat): # Running np.all on the entire array or looping manually through the # entire array is extremely slow when we can't stop early. # Instead we check for a few key pixels, in this case, corners - return np.all(image[::image.shape[0] - 1, ::image.shape[1] - 1] == OBS_CAMERA_BLANK_PIXEL) + return np.all(image[::image.shape[0] - 1, ::image.shape[1] - 1] == OBS_VIRTUALCAM_PLUGIN_BLANK_PIXEL) class VideoCaptureDeviceCaptureMethod(CaptureMethodBase): + name = "Video Capture Device" + short_description = "see below" + description = ( + "\nUses a Video Capture Device, like a webcam, virtual cam, or capture card. " + + "\nYou can select one below. " + ) + capture_device: cv2.VideoCapture capture_thread: Thread | None stop_thread: Event @@ -118,8 +125,5 @@ def get_frame(self, autosplit: AutoSplit): ] return cv2.cvtColor(image, cv2.COLOR_BGR2BGRA), is_old_image - def recover_window(self, captured_window_title: str, autosplit: AutoSplit) -> bool: - raise NotImplementedError - def check_selected_region_exists(self, autosplit: AutoSplit): return bool(self.capture_device.isOpened()) diff --git a/src/capture_method/WindowsGraphicsCaptureMethod.py b/src/capture_method/WindowsGraphicsCaptureMethod.py index 68315f8e..e38f7957 100644 --- a/src/capture_method/WindowsGraphicsCaptureMethod.py +++ b/src/capture_method/WindowsGraphicsCaptureMethod.py @@ -13,15 +13,28 @@ from winsdk.windows.graphics.imaging import BitmapBufferAccessMode, SoftwareBitmap from capture_method.CaptureMethodBase import CaptureMethodBase -from utils import RGBA_CHANNEL_COUNT, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd +from utils import RGBA_CHANNEL_COUNT, WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, get_direct3d_device, is_valid_hwnd if TYPE_CHECKING: from AutoSplit import AutoSplit WGC_NO_BORDER_MIN_BUILD = 20348 +LEARNING_MODE_DEVICE_BUILD = 17763 +"""https://learn.microsoft.com/en-us/uwp/api/windows.ai.machinelearning.learningmodeldevice""" class WindowsGraphicsCaptureMethod(CaptureMethodBase): + name = "Windows Graphics Capture" + short_description = "fast, most compatible, capped at 60fps" + description = ( + f"\nOnly available in Windows 10.0.{WGC_MIN_BUILD} and up. " + + f"\nDue to current technical limitations, Windows versions below 10.0.0.{LEARNING_MODE_DEVICE_BUILD}" + + "\nrequire having at least one audio or video Capture Device connected and enabled." + + "\nAllows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. " + + "\nAdds a yellow border on Windows 10 (not on Windows 11)." + + "\nCaps at around 60 FPS. " + ) + size: SizeInt32 frame_pool: Direct3D11CaptureFramePool | None = None session: GraphicsCaptureSession | None = None diff --git a/src/capture_method/__init__.py b/src/capture_method/__init__.py index cd2c2553..31c3f036 100644 --- a/src/capture_method/__init__.py +++ b/src/capture_method/__init__.py @@ -15,16 +15,11 @@ from capture_method.ForceFullContentRenderingCaptureMethod import ForceFullContentRenderingCaptureMethod from capture_method.VideoCaptureDeviceCaptureMethod import VideoCaptureDeviceCaptureMethod from capture_method.WindowsGraphicsCaptureMethod import WindowsGraphicsCaptureMethod -from utils import GITHUB_REPOSITORY, WINDOWS_BUILD_NUMBER, first, try_get_direct3d_device +from utils import WGC_MIN_BUILD, WINDOWS_BUILD_NUMBER, first, try_get_direct3d_device if TYPE_CHECKING: from AutoSplit import AutoSplit -WGC_MIN_BUILD = 17134 -"""https://docs.microsoft.com/en-us/uwp/api/windows.graphics.capture.graphicscapturepicker#applies-to""" -LEARNING_MODE_DEVICE_BUILD = 17763 -"""https://learn.microsoft.com/en-us/uwp/api/windows.ai.machinelearning.learningmodeldevice""" - class Region(TypedDict): x: int @@ -33,14 +28,6 @@ class Region(TypedDict): height: int -@dataclass -class CaptureMethodInfo(): - name: str - short_description: str - description: str - implementation: type[CaptureMethodBase] - - class CaptureMethodMeta(EnumMeta): # Allow checking if simple string is enum def __contains__(self, other: str): @@ -74,7 +61,7 @@ def __hash__(self): VIDEO_CAPTURE_DEVICE = "VIDEO_CAPTURE_DEVICE" -class CaptureMethodDict(OrderedDict[CaptureMethodEnum, CaptureMethodInfo]): +class CaptureMethodDict(OrderedDict[CaptureMethodEnum, type[CaptureMethodBase]]): def get_index(self, capture_method: str | CaptureMethodEnum): """Returns 0 if the capture_method is invalid or unsupported.""" try: @@ -99,99 +86,37 @@ def get_method_by_index(self, index: int): def get(self, __key: CaptureMethodEnum): """ - Returns the `CaptureMethodInfo` for `CaptureMethodEnum` if `CaptureMethodEnum` is available, + Returns the `CaptureMethodBase` subclass for `CaptureMethodEnum` if `CaptureMethodEnum` is available, else defaults to the first available `CaptureMethodEnum`. - Returns the `CaptureMethodBase` (default) implementation if there's no capture methods. + Returns `CaptureMethodBase` (default) directly if there's no capture methods. """ if __key == CaptureMethodEnum.NONE or len(self) <= 0: - return NONE_CAPTURE_METHOD + return CaptureMethodBase return super().get(__key, first(self.values())) -NONE_CAPTURE_METHOD = CaptureMethodInfo( - name="None", - short_description="", - description="", - implementation=CaptureMethodBase, -) - CAPTURE_METHODS = CaptureMethodDict() if ( # Windows Graphics Capture requires a minimum Windows Build WINDOWS_BUILD_NUMBER >= WGC_MIN_BUILD # Our current implementation of Windows Graphics Capture does not ensure we can get an ID3DDevice and try_get_direct3d_device() ): - CAPTURE_METHODS[CaptureMethodEnum.WINDOWS_GRAPHICS_CAPTURE] = CaptureMethodInfo( - name="Windows Graphics Capture", - short_description="fast, most compatible, capped at 60fps", - description=( - f"\nOnly available in Windows 10.0.{WGC_MIN_BUILD} and up. " - + f"\nDue to current technical limitations, Windows versions below 10.0.0.{LEARNING_MODE_DEVICE_BUILD}" - + "\nrequire having at least one audio or video Capture Device connected and enabled." - + "\nAllows recording UWP apps, Hardware Accelerated and Exclusive Fullscreen windows. " - + "\nAdds a yellow border on Windows 10 (not on Windows 11)." - + "\nCaps at around 60 FPS. " - ), - implementation=WindowsGraphicsCaptureMethod, - ) -CAPTURE_METHODS[CaptureMethodEnum.BITBLT] = CaptureMethodInfo( - name="BitBlt", - short_description="fastest, least compatible", - description=( - "\nThe best option when compatible. But it cannot properly record " - + "\nOpenGL, Hardware Accelerated or Exclusive Fullscreen windows. " - + "\nThe smaller the selected region, the more efficient it is. " - ), - - implementation=BitBltCaptureMethod, -) + CAPTURE_METHODS[CaptureMethodEnum.WINDOWS_GRAPHICS_CAPTURE] = WindowsGraphicsCaptureMethod +CAPTURE_METHODS[CaptureMethodEnum.BITBLT] = BitBltCaptureMethod try: # Test for laptop cross-GPU Desktop Duplication issue import d3dshot d3dshot.create(capture_output="numpy") except (ModuleNotFoundError, COMError): pass else: - CAPTURE_METHODS[CaptureMethodEnum.DESKTOP_DUPLICATION] = CaptureMethodInfo( - name="Direct3D Desktop Duplication", - short_description="slower, bound to display", - description=( - "\nDuplicates the desktop using Direct3D. " - + "\nIt can record OpenGL and Hardware Accelerated windows. " - + "\nAbout 10-15x slower than BitBlt. Not affected by window size. " - + "\nOverlapping windows will show up and can't record across displays. " - + "\nThis option may not be available for hybrid GPU laptops, " - + "\nsee /docs/D3DDD-Note-Laptops.md for a solution. " - + f"\nhttps://www.github.com/{GITHUB_REPOSITORY}#capture-method " - ), - implementation=DesktopDuplicationCaptureMethod, - ) -CAPTURE_METHODS[CaptureMethodEnum.PRINTWINDOW_RENDERFULLCONTENT] = CaptureMethodInfo( - name="Force Full Content Rendering", - short_description="very slow, can affect rendering", - description=( - "\nUses BitBlt behind the scene, but passes a special flag " - + "\nto PrintWindow to force rendering the entire desktop. " - + "\nAbout 10-15x slower than BitBlt based on original window size " - + "\nand can mess up some applications' rendering pipelines. " - ), - implementation=ForceFullContentRenderingCaptureMethod, -) -CAPTURE_METHODS[CaptureMethodEnum.VIDEO_CAPTURE_DEVICE] = CaptureMethodInfo( - name="Video Capture Device", - short_description="see below", - description=( - "\nUses a Video Capture Device, like a webcam, virtual cam, or capture card. " - + "\nYou can select one below. " - + "\nIf you want to use this with OBS' Virtual Camera, use the Virtualcam plugin instead " - + "\nhttps://github.com/Avasam/obs-virtual-cam/releases" - ), - implementation=VideoCaptureDeviceCaptureMethod, -) + CAPTURE_METHODS[CaptureMethodEnum.DESKTOP_DUPLICATION] = DesktopDuplicationCaptureMethod +CAPTURE_METHODS[CaptureMethodEnum.PRINTWINDOW_RENDERFULLCONTENT] = ForceFullContentRenderingCaptureMethod +CAPTURE_METHODS[CaptureMethodEnum.VIDEO_CAPTURE_DEVICE] = VideoCaptureDeviceCaptureMethod def change_capture_method(selected_capture_method: CaptureMethodEnum, autosplit: AutoSplit): autosplit.capture_method.close(autosplit) - autosplit.capture_method = CAPTURE_METHODS.get(selected_capture_method).implementation(autosplit) + autosplit.capture_method = CAPTURE_METHODS.get(selected_capture_method)(autosplit) if selected_capture_method == CaptureMethodEnum.VIDEO_CAPTURE_DEVICE: autosplit.select_region_button.setDisabled(True) autosplit.select_window_button.setDisabled(True) @@ -209,11 +134,17 @@ class CameraInfo(): resolution: tuple[int, int] +def get_input_devices(): + """https://github.com/andreaschiavinato/python_grabber/pull/24 .""" + return cast(list[str], FilterGraph().get_input_devices()) + + def get_input_device_resolution(index: int): filter_graph = FilterGraph() try: filter_graph.add_video_input_device(index) - # This can happen since OBS 29.1 DLL blocking breaking VirtualCam + # This can happen with virtual cameras throwing errors. + # For example since OBS 29.1 updated FFMPEG breaking VirtualCam 3.0 # https://github.com/Toufool/AutoSplit/issues/238 except COMError: return None @@ -223,8 +154,7 @@ def get_input_device_resolution(index: int): async def get_all_video_capture_devices() -> list[CameraInfo]: - # TODO: Fix partially Unknown list upstream - named_video_inputs: list[str] = FilterGraph().get_input_devices() + named_video_inputs = get_input_devices() async def get_camera_info(index: int, device_name: str): backend = "" @@ -250,13 +180,11 @@ async def get_camera_info(index: int, device_name: str): if resolution is not None \ else None + # Note: Return type required https://github.com/python/typeshed/issues/2652 future = asyncio.gather( *[ get_camera_info(index, name) for index, name in enumerate(named_video_inputs) - # Will crash when trying to resize, and does not work to begin with - # TODO: Should be fixed in next release of OpenCV (4.8) - if name != "OBS Virtual Camera" ], ) diff --git a/src/error_messages.py b/src/error_messages.py index 8eb6c9a0..63462140 100644 --- a/src/error_messages.py +++ b/src/error_messages.py @@ -8,7 +8,7 @@ from types import TracebackType from typing import TYPE_CHECKING, NoReturn -from PyQt6 import QtCore, QtWidgets +from PySide6 import QtCore, QtWidgets from utils import FROZEN, GITHUB_REPOSITORY @@ -177,7 +177,7 @@ def excepthook(exception_type: type[BaseException], exception: BaseException, _t # HACK: Can happen when starting the region selector while capturing with WindowsGraphicsCapture if ( exception_type is SystemError - and str(exception) == " returned a result with an error set" + and str(exception) == " returned a result with an error set" ): return # Whithin LiveSplit excepthook needs to use MainWindow's signals to show errors diff --git a/src/hotkeys.py b/src/hotkeys.py index e0fe3c65..b0575467 100644 --- a/src/hotkeys.py +++ b/src/hotkeys.py @@ -5,7 +5,7 @@ import keyboard import pyautogui -from PyQt6 import QtWidgets +from PySide6 import QtWidgets import error_messages from utils import fire_and_forget, is_digit @@ -105,7 +105,7 @@ def __validate_keypad(expected_key: str, keyboard_event: keyboard.KeyboardEvent) NOTE: This is a workaround very specific to numpads. Windows reports different physical keys with the same scan code. For example, "Home", "Num Home" and "Num 7" are all `71`. - See: https://github.com/boppreh/keyboard/issues/171#issuecomment-390437684. + See: https://github.com/boppreh/keyboard/issues/171#issuecomment-390437684 . Since we reuse the key string we set to send to LiveSplit, we can't use fake names like "num home". We're also trying to achieve the same hotkey behaviour as LiveSplit has. @@ -153,7 +153,7 @@ def __get_key_name(keyboard_event: keyboard.KeyboardEvent): def __get_hotkey_name(names: list[str]): """ Uses keyboard.get_hotkey_name but works with non-english modifiers and keypad - See: https://github.com/boppreh/keyboard/issues/516. + See: https://github.com/boppreh/keyboard/issues/516 . """ def sorting_key(key: str): return not keyboard.is_modifier(keyboard.key_to_scan_codes(key)[0]) diff --git a/src/menu_bar.py b/src/menu_bar.py index a37a9a75..19b6b549 100644 --- a/src/menu_bar.py +++ b/src/menu_bar.py @@ -6,7 +6,7 @@ import requests from packaging.version import parse as version_parse -from PyQt6 import QtCore, QtWidgets +from PySide6 import QtCore, QtWidgets from requests.exceptions import RequestException import error_messages @@ -18,16 +18,9 @@ change_capture_method, get_all_video_capture_devices, ) -from gen import about, design, resources_rc, settings as settings_ui, update_checker # noqa: F401 +from gen import about, design, settings as settings_ui, update_checker from hotkeys import HOTKEYS, Hotkey, set_hotkey -from utils import ( - AUTOSPLIT_VERSION, - FIRST_WIN_11_BUILD, - GITHUB_REPOSITORY, - WINDOWS_BUILD_NUMBER, - decimal, - fire_and_forget, -) +from utils import AUTOSPLIT_VERSION, GITHUB_REPOSITORY, decimal, fire_and_forget if TYPE_CHECKING: from AutoSplit import AutoSplit @@ -113,7 +106,6 @@ def about_qt(): def about_qt_for_python(): webbrowser.open("https://wiki.qt.io/Qt_for_Python") - webbrowser.open("https://www.riverbankcomputing.com/software/pyqt") def check_for_updates(autosplit: AutoSplit, check_on_open: bool = False): @@ -205,16 +197,6 @@ def __set_all_capture_devices(self): else: self.capture_device_combobox.setPlaceholderText("No device found.") - def __apply_os_specific_ui_fixes(self): - # Spinbox frame disappears and reappears on Windows 11. It's much cleaner to just disable them. - # Most likely related: https://bugreports.qt.io/browse/QTBUG-95215?jql=labels%20%3D%20Windows11 - # Arrow buttons tend to move a lot as well - if WINDOWS_BUILD_NUMBER >= FIRST_WIN_11_BUILD: - self.fps_limit_spinbox.setFrame(False) - self.default_similarity_threshold_spinbox.setFrame(False) - self.default_delay_time_spinbox.setFrame(False) - self.default_pause_time_spinbox.setFrame(False) - def __set_readme_link(self): self.custom_image_settings_info_label.setText( self.custom_image_settings_info_label @@ -224,10 +206,7 @@ def __set_readme_link(self): # HACK: This is a workaround because custom_image_settings_info_label # simply will not open links with a left click no matter what we tried. self.readme_link_button.clicked.connect( - # PyQt6 typing is wrong - lambda: webbrowser.open( # pyright: ignore[reportGeneralTypeIssues] - f"https://github.com/{GITHUB_REPOSITORY}#readme", - ), + lambda: webbrowser.open(f"https://github.com/{GITHUB_REPOSITORY}#readme"), ) self.readme_link_button.setStyleSheet("border: 0px; background-color:rgba(0,0,0,0%);") @@ -235,7 +214,6 @@ def __init__(self, autosplit: AutoSplit): super().__init__() self.setupUi(self) self.autosplit = autosplit - self.__apply_os_specific_ui_fixes() self.__set_readme_link() # Don't autofocus any particular field self.setFocus() @@ -244,21 +222,18 @@ def __init__(self, autosplit: AutoSplit): # region Build the Capture method combobox capture_method_values = CAPTURE_METHODS.values() self.__set_all_capture_devices() - capture_list_items = [ + + # TODO: Word-wrapping works, but there's lots of extra padding to the right. Raise issue upstream + # list_view = QtWidgets.QListView() + # list_view.setWordWrap(True) + # list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff) + # list_view.setFixedWidth(self.capture_method_combobox.width()) + # self.capture_method_combobox.setView(list_view) + + self.capture_method_combobox.addItems([ f"- {method.name} ({method.short_description})" for method in capture_method_values - ] - list_view = QtWidgets.QListView() - list_view.setWordWrap(True) - # HACK: The first time the dropdown is rendered, it does not have the right height - # Assuming all options take 2 lines (except camera and BitBlt which have 1). - # And all lines take 16 pixels - # And all separators take 2 pixels - doubled_len = 2 * len(capture_method_values) or 2 - list_view.setMinimumHeight((doubled_len - 2) * 16 + doubled_len) - list_view.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOff) - self.capture_method_combobox.setView(list_view) - self.capture_method_combobox.addItems(capture_list_items) + ]) self.capture_method_combobox.setToolTip( "\n\n".join([ f"{method.name} :\n{method.description}" diff --git a/src/region_selection.py b/src/region_selection.py index c5e18dbe..816f0976 100644 --- a/src/region_selection.py +++ b/src/region_selection.py @@ -8,25 +8,18 @@ import cv2 import numpy as np -from PyQt6 import QtCore, QtGui, QtWidgets -from PyQt6.QtTest import QTest +from PySide6 import QtCore, QtGui, QtWidgets +from PySide6.QtTest import QTest +from pywinctl import getTopWindowAt from typing_extensions import override from win32 import win32gui from win32con import SM_CXVIRTUALSCREEN, SM_CYVIRTUALSCREEN, SM_XVIRTUALSCREEN, SM_YVIRTUALSCREEN -from winsdk._winrt import initialize_with_window # pylint: disable=no-name-in-module +from winsdk._winrt import initialize_with_window from winsdk.windows.foundation import AsyncStatus, IAsyncOperation from winsdk.windows.graphics.capture import GraphicsCaptureItem, GraphicsCapturePicker import error_messages -from utils import ( - MAXBYTE, - RGB_CHANNEL_COUNT, - ImageShape, - get_window_bounds, - getTopWindowAt, - is_valid_hwnd, - is_valid_image, -) +from utils import MAXBYTE, RGB_CHANNEL_COUNT, ImageShape, get_window_bounds, is_valid_hwnd, is_valid_image user32 = ctypes.windll.user32 @@ -319,8 +312,8 @@ def __init__(self): self.show() @override - def keyPressEvent(self, a0: QtGui.QKeyEvent): - if a0.key() == QtCore.Qt.Key.Key_Escape: + def keyPressEvent(self, event: QtGui.QKeyEvent): + if event.key() == QtCore.Qt.Key.Key_Escape: self.close() @@ -328,16 +321,16 @@ class SelectWindowWidget(BaseSelectWidget): """Widget to select a window and obtain its bounds.""" @override - def mouseReleaseEvent(self, a0: QtGui.QMouseEvent): - self._x = int(a0.position().x()) + self.geometry().x() - self._y = int(a0.position().y()) + self.geometry().y() + def mouseReleaseEvent(self, event: QtGui.QMouseEvent): + self._x = int(event.position().x()) + self.geometry().x() + self._y = int(event.position().y()) + self.geometry().y() self.close() class SelectRegionWidget(BaseSelectWidget): """ Widget for dragging screen region - https://github.com/harupy/snipping-tool. + Originated from https://github.com/harupy/snipping-tool . """ _right: int = 0 @@ -358,7 +351,7 @@ def width(self): return self._right - self._x @override - def paintEvent(self, a0: QtGui.QPaintEvent): + def paintEvent(self, event: QtGui.QPaintEvent): if self.__begin != self.__end: qpainter = QtGui.QPainter(self) qpainter.setPen(QtGui.QPen(QtGui.QColor("red"), BORDER_WIDTH)) @@ -366,18 +359,18 @@ def paintEvent(self, a0: QtGui.QPaintEvent): qpainter.drawRect(QtCore.QRect(self.__begin, self.__end)) @override - def mousePressEvent(self, a0: QtGui.QMouseEvent): - self.__begin = a0.position().toPoint() + def mousePressEvent(self, event: QtGui.QMouseEvent): + self.__begin = event.position().toPoint() self.__end = self.__begin self.update() @override - def mouseMoveEvent(self, a0: QtGui.QMouseEvent): - self.__end = a0.position().toPoint() + def mouseMoveEvent(self, event: QtGui.QMouseEvent): + self.__end = event.position().toPoint() self.update() @override - def mouseReleaseEvent(self, a0: QtGui.QMouseEvent): + def mouseReleaseEvent(self, event: QtGui.QMouseEvent): if self.__begin != self.__end: # The coordinates are pulled relative to the top left of the set geometry, # so the added virtual screen offsets convert them back to the virtual screen coordinates diff --git a/src/user_profile.py b/src/user_profile.py index 9be788d6..8cf5075f 100644 --- a/src/user_profile.py +++ b/src/user_profile.py @@ -4,7 +4,7 @@ from typing import TYPE_CHECKING, TypedDict, cast import toml -from PyQt6 import QtCore, QtWidgets +from PySide6 import QtCore, QtWidgets import error_messages from capture_method import CAPTURE_METHODS, CaptureMethodEnum, Region, change_capture_method @@ -84,7 +84,6 @@ def save_settings_as(autosplit: AutoSplit): or os.path.join(auto_split_directory, "settings.toml"), "TOML (*.toml)", )[0] - # If user cancels save destination window, don't save settings if not save_settings_file_path: return "" @@ -164,7 +163,7 @@ def load_settings(autosplit: AutoSplit, from_path: str = ""): autosplit.last_successfully_loaded_settings_file_path = load_settings_file_path # TODO: Should this check be in `__load_start_image` ? if not autosplit.is_running: - autosplit.load_start_image_signal.emit() + autosplit.load_start_image_signal.emit(False, True) def load_settings_on_open(autosplit: AutoSplit): @@ -196,7 +195,9 @@ def load_check_for_updates_on_open(autosplit: AutoSplit): value = QtCore \ .QSettings("AutoSplit", "Check For Updates On Open") \ .value("check_for_updates_on_open", True, type=bool) - autosplit.action_check_for_updates_on_open.setChecked(value) + # Type not infered by PySide6 + # TODO: Report this issue upstream + autosplit.action_check_for_updates_on_open.setChecked(value) # pyright: ignore[reportGeneralTypeIssues] def set_check_for_updates_on_open(design_window: design.Ui_MainWindow, value: bool): diff --git a/src/utils.py b/src/utils.py index dd374a74..de827fb2 100644 --- a/src/utils.py +++ b/src/utils.py @@ -152,7 +152,8 @@ def fire_and_forget(func: Callable[..., Any]): """ Runs synchronous function asynchronously without waiting for a response. - Uses threads on Windows because `RuntimeError: There is no current event loop in thread 'MainThread'.` + Uses threads on Windows because ~~`RuntimeError: There is no current event loop in thread 'MainThread'.`~~ + Because maybe asyncio has issues. Unsure. See alpha.5 and https://github.com/Avasam/AutoSplit/issues/36 Uses asyncio on Linux because of a `Segmentation fault (core dumped)` """ @@ -166,31 +167,12 @@ def wrapped(*args: Any, **kwargs: Any): return wrapped -def getTopWindowAt(x: int, y: int): # noqa: N802 - # Immitating PyWinCTL's function - class Win32Window(): - def __init__(self, hwnd: int) -> None: - self._hWnd = hwnd - - def getHandle(self): # noqa: N802 - return self._hWnd - - @property - def title(self): - return win32gui.GetWindowText(self._hWnd) - hwnd = win32gui.WindowFromPoint((x, y)) - - # Want to pull the parent window from the window handle - # By using GetAncestor we are able to get the parent window instead of the owner window. - while win32gui.IsChild(win32gui.GetParent(hwnd), hwnd): - hwnd = ctypes.windll.user32.GetAncestor(hwnd, 2) - return Win32Window(hwnd) if hwnd else None - - # Environment specifics WINDOWS_BUILD_NUMBER = int(version().split(".")[-1]) if sys.platform == "win32" else -1 FIRST_WIN_11_BUILD = 22000 """AutoSplit Version number""" +WGC_MIN_BUILD = 17134 +"""https://docs.microsoft.com/en-us/uwp/api/windows.graphics.capture.graphicscapturepicker#applies-to""" FROZEN = hasattr(sys, "frozen") """Running from build made by PyInstaller""" auto_split_directory = os.path.dirname(sys.executable if FROZEN else os.path.abspath(__file__)) @@ -198,5 +180,5 @@ def title(self): # Shared strings # Check `excludeBuildNumber` during workflow dispatch build generate a clean version number -AUTOSPLIT_VERSION = "2.0.1" + (f"-{AUTOSPLIT_BUILD_NUMBER}" if AUTOSPLIT_BUILD_NUMBER else "") +AUTOSPLIT_VERSION = "2.1.0" + (f"-{AUTOSPLIT_BUILD_NUMBER}" if AUTOSPLIT_BUILD_NUMBER else "") GITHUB_REPOSITORY = AUTOSPLIT_GITHUB_REPOSITORY diff --git a/typings/PyQt6/QtTest.pyi b/typings/PyQt6/QtTest.pyi deleted file mode 100644 index 83a6c435..00000000 --- a/typings/PyQt6/QtTest.pyi +++ /dev/null @@ -1,13 +0,0 @@ -import typing - -import PyQt6.sip - -# Email sent to pyqt@riverbankcomputing.com - - -class QTest(PyQt6.sip.simplewrapper): - @typing.overload - @staticmethod - def qWait(ms: int) -> None: ... - @typing.overload - def qWait(self, ms: int) -> None: ... diff --git a/typings/cv2/__init__.pyi b/typings/cv2/__init__.pyi index 3a7ad7f6..30881037 100644 --- a/typings/cv2/__init__.pyi +++ b/typings/cv2/__init__.pyi @@ -1,3 +1,10 @@ +import typing + +import cv2.cuda +import cv2.gapi +import cv2.gapi.streaming +import cv2.typing +import numpy # noqa: ICN001 from cv2 import ( Error as Error, data as data, @@ -7,16 +14,11706 @@ from cv2 import ( utils as utils, version as version, ) -from cv2.cv2 import * # noqa: F403 -from cv2.mat_wrapper import Mat as WrappedMat, _NDArray from typing_extensions import TypeAlias -__all__: list[str] = [] +Mat: TypeAlias = cv2.typing.MatLike + + +class error(Exception): + code: typing.ClassVar[int] + err: typing.ClassVar[str] + file: typing.ClassVar[str] + func: typing.ClassVar[str] + line: typing.ClassVar[int] + msg: typing.ClassVar[str] + + +# Enumerations +SORT_EVERY_ROW: int +SORT_EVERY_COLUMN: int +SORT_ASCENDING: int +SORT_DESCENDING: int +SortFlags = int # One of [SORT_EVERY_ROW, SORT_EVERY_COLUMN, SORT_ASCENDING, SORT_DESCENDING] + +COVAR_SCRAMBLED: int +COVAR_NORMAL: int +COVAR_USE_AVG: int +COVAR_SCALE: int +COVAR_ROWS: int +COVAR_COLS: int +CovarFlags = int # One of [COVAR_SCRAMBLED, COVAR_NORMAL, COVAR_USE_AVG, COVAR_SCALE, COVAR_ROWS, COVAR_COLS] + +KMEANS_RANDOM_CENTERS: int +KMEANS_PP_CENTERS: int +KMEANS_USE_INITIAL_LABELS: int +KmeansFlags = int # One of [KMEANS_RANDOM_CENTERS, KMEANS_PP_CENTERS, KMEANS_USE_INITIAL_LABELS] + +REDUCE_SUM: int +REDUCE_AVG: int +REDUCE_MAX: int +REDUCE_MIN: int +REDUCE_SUM2: int +ReduceTypes = int # One of [REDUCE_SUM, REDUCE_AVG, REDUCE_MAX, REDUCE_MIN, REDUCE_SUM2] + +ROTATE_90_CLOCKWISE: int +ROTATE_180: int +ROTATE_90_COUNTERCLOCKWISE: int +RotateFlags = int # One of [ROTATE_90_CLOCKWISE, ROTATE_180, ROTATE_90_COUNTERCLOCKWISE] + +Param_INT: int +Param_BOOLEAN: int +Param_REAL: int +Param_STRING: int +Param_MAT: int +Param_MAT_VECTOR: int +Param_ALGORITHM: int +Param_FLOAT: int +Param_UNSIGNED_INT: int +Param_UINT64: int +Param_UCHAR: int +Param_SCALAR: int +# One of [INT, BOOLEAN, REAL, STRING, MAT, MAT_VECTOR, ALGORITHM, FLOAT, UNSIGNED_INT, UINT64, UCHAR, SCALAR] +Param = int + +DECOMP_LU: int +DECOMP_SVD: int +DECOMP_EIG: int +DECOMP_CHOLESKY: int +DECOMP_QR: int +DECOMP_NORMAL: int +DecompTypes = int # One of [DECOMP_LU, DECOMP_SVD, DECOMP_EIG, DECOMP_CHOLESKY, DECOMP_QR, DECOMP_NORMAL] + +NORM_INF: int +NORM_L1: int +NORM_L2: int +NORM_L2SQR: int +NORM_HAMMING: int +NORM_HAMMING2: int +NORM_TYPE_MASK: int +NORM_RELATIVE: int +NORM_MINMAX: int +# One of [NORM_INF, NORM_L1, NORM_L2, NORM_L2SQR, NORM_HAMMING, +# NORM_HAMMING2, NORM_TYPE_MASK, NORM_RELATIVE, NORM_MINMAX] +NormTypes = int + +CMP_EQ: int +CMP_GT: int +CMP_GE: int +CMP_LT: int +CMP_LE: int +CMP_NE: int +CmpTypes = int # One of [CMP_EQ, CMP_GT, CMP_GE, CMP_LT, CMP_LE, CMP_NE] + +GEMM_1_T: int +GEMM_2_T: int +GEMM_3_T: int +GemmFlags = int # One of [GEMM_1_T, GEMM_2_T, GEMM_3_T] + +DFT_INVERSE: int +DFT_SCALE: int +DFT_ROWS: int +DFT_COMPLEX_OUTPUT: int +DFT_REAL_OUTPUT: int +DFT_COMPLEX_INPUT: int +DCT_INVERSE: int +DCT_ROWS: int +# One of [DFT_INVERSE, DFT_SCALE, DFT_ROWS, DFT_COMPLEX_OUTPUT, +# DFT_REAL_OUTPUT, DFT_COMPLEX_INPUT, DCT_INVERSE, DCT_ROWS] +DftFlags = int + +BORDER_CONSTANT: int +BORDER_REPLICATE: int +BORDER_REFLECT: int +BORDER_WRAP: int +BORDER_REFLECT_101: int +BORDER_TRANSPARENT: int +BORDER_REFLECT101: int +BORDER_DEFAULT: int +BORDER_ISOLATED: int +# One of [BORDER_CONSTANT, BORDER_REPLICATE, BORDER_REFLECT, BORDER_WRAP, +# BORDER_REFLECT_101, BORDER_TRANSPARENT, BORDER_REFLECT101, +# BORDER_DEFAULT, BORDER_ISOLATED] +BorderTypes = int + +ACCESS_READ: int +ACCESS_WRITE: int +ACCESS_RW: int +ACCESS_MASK: int +ACCESS_FAST: int +AccessFlag = int # One of [ACCESS_READ, ACCESS_WRITE, ACCESS_RW, ACCESS_MASK, ACCESS_FAST] + +USAGE_DEFAULT: int +USAGE_ALLOCATE_HOST_MEMORY: int +USAGE_ALLOCATE_DEVICE_MEMORY: int +USAGE_ALLOCATE_SHARED_MEMORY: int +__UMAT_USAGE_FLAGS_32BIT: int +# One of [USAGE_DEFAULT, USAGE_ALLOCATE_HOST_MEMORY, +# USAGE_ALLOCATE_DEVICE_MEMORY, USAGE_ALLOCATE_SHARED_MEMORY, +# __UMAT_USAGE_FLAGS_32BIT] +UMatUsageFlags = int + +SOLVELP_UNBOUNDED: int +SOLVELP_UNFEASIBLE: int +SOLVELP_SINGLE: int +SOLVELP_MULTI: int +SolveLPResult = int # One of [SOLVELP_UNBOUNDED, SOLVELP_UNFEASIBLE, SOLVELP_SINGLE, SOLVELP_MULTI] + +QUAT_ASSUME_NOT_UNIT: int +QUAT_ASSUME_UNIT: int +QuatAssumeType = int # One of [QUAT_ASSUME_NOT_UNIT, QUAT_ASSUME_UNIT] + +FILTER_SCHARR: int +SpecialFilter = int # One of [FILTER_SCHARR] + +MORPH_ERODE: int +MORPH_DILATE: int +MORPH_OPEN: int +MORPH_CLOSE: int +MORPH_GRADIENT: int +MORPH_TOPHAT: int +MORPH_BLACKHAT: int +MORPH_HITMISS: int +# One of [MORPH_ERODE, MORPH_DILATE, MORPH_OPEN, MORPH_CLOSE, +# MORPH_GRADIENT, MORPH_TOPHAT, MORPH_BLACKHAT, MORPH_HITMISS] +MorphTypes = int + +MORPH_RECT: int +MORPH_CROSS: int +MORPH_ELLIPSE: int +MorphShapes = int # One of [MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE] + +INTER_NEAREST: int +INTER_LINEAR: int +INTER_CUBIC: int +INTER_AREA: int +INTER_LANCZOS4: int +INTER_LINEAR_EXACT: int +INTER_NEAREST_EXACT: int +INTER_MAX: int +WARP_FILL_OUTLIERS: int +WARP_INVERSE_MAP: int +# One of [INTER_NEAREST, INTER_LINEAR, INTER_CUBIC, INTER_AREA, +# INTER_LANCZOS4, INTER_LINEAR_EXACT, INTER_NEAREST_EXACT, INTER_MAX, +# WARP_FILL_OUTLIERS, WARP_INVERSE_MAP] +InterpolationFlags = int + +WARP_POLAR_LINEAR: int +WARP_POLAR_LOG: int +WarpPolarMode = int # One of [WARP_POLAR_LINEAR, WARP_POLAR_LOG] + +INTER_BITS: int +INTER_BITS2: int +INTER_TAB_SIZE: int +INTER_TAB_SIZE2: int +InterpolationMasks = int # One of [INTER_BITS, INTER_BITS2, INTER_TAB_SIZE, INTER_TAB_SIZE2] + +DIST_USER: int +DIST_L1: int +DIST_L2: int +DIST_C: int +DIST_L12: int +DIST_FAIR: int +DIST_WELSCH: int +DIST_HUBER: int +DistanceTypes = int # One of [DIST_USER, DIST_L1, DIST_L2, DIST_C, DIST_L12, DIST_FAIR, DIST_WELSCH, DIST_HUBER] + +DIST_MASK_3: int +DIST_MASK_5: int +DIST_MASK_PRECISE: int +DistanceTransformMasks = int # One of [DIST_MASK_3, DIST_MASK_5, DIST_MASK_PRECISE] + +THRESH_BINARY: int +THRESH_BINARY_INV: int +THRESH_TRUNC: int +THRESH_TOZERO: int +THRESH_TOZERO_INV: int +THRESH_MASK: int +THRESH_OTSU: int +THRESH_TRIANGLE: int +# One of [THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, +# THRESH_TOZERO_INV, THRESH_MASK, THRESH_OTSU, THRESH_TRIANGLE] +ThresholdTypes = int + +ADAPTIVE_THRESH_MEAN_C: int +ADAPTIVE_THRESH_GAUSSIAN_C: int +AdaptiveThresholdTypes = int # One of [ADAPTIVE_THRESH_MEAN_C, ADAPTIVE_THRESH_GAUSSIAN_C] + +GC_BGD: int +GC_FGD: int +GC_PR_BGD: int +GC_PR_FGD: int +GrabCutClasses = int # One of [GC_BGD, GC_FGD, GC_PR_BGD, GC_PR_FGD] + +GC_INIT_WITH_RECT: int +GC_INIT_WITH_MASK: int +GC_EVAL: int +GC_EVAL_FREEZE_MODEL: int +GrabCutModes = int # One of [GC_INIT_WITH_RECT, GC_INIT_WITH_MASK, GC_EVAL, GC_EVAL_FREEZE_MODEL] + +DIST_LABEL_CCOMP: int +DIST_LABEL_PIXEL: int +DistanceTransformLabelTypes = int # One of [DIST_LABEL_CCOMP, DIST_LABEL_PIXEL] + +FLOODFILL_FIXED_RANGE: int +FLOODFILL_MASK_ONLY: int +FloodFillFlags = int # One of [FLOODFILL_FIXED_RANGE, FLOODFILL_MASK_ONLY] + +CC_STAT_LEFT: int +CC_STAT_TOP: int +CC_STAT_WIDTH: int +CC_STAT_HEIGHT: int +CC_STAT_AREA: int +CC_STAT_MAX: int +# One of [CC_STAT_LEFT, CC_STAT_TOP, CC_STAT_WIDTH, CC_STAT_HEIGHT, CC_STAT_AREA, CC_STAT_MAX] +ConnectedComponentsTypes = int + +CCL_DEFAULT: int +CCL_WU: int +CCL_GRANA: int +CCL_BOLELLI: int +CCL_SAUF: int +CCL_BBDT: int +CCL_SPAGHETTI: int +# One of [CCL_DEFAULT, CCL_WU, CCL_GRANA, CCL_BOLELLI, CCL_SAUF, CCL_BBDT, CCL_SPAGHETTI] +ConnectedComponentsAlgorithmsTypes = int + +RETR_EXTERNAL: int +RETR_LIST: int +RETR_CCOMP: int +RETR_TREE: int +RETR_FLOODFILL: int +RetrievalModes = int # One of [RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE, RETR_FLOODFILL] + +CHAIN_APPROX_NONE: int +CHAIN_APPROX_SIMPLE: int +CHAIN_APPROX_TC89_L1: int +CHAIN_APPROX_TC89_KCOS: int +# One of [CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS] +ContourApproximationModes = int + +CONTOURS_MATCH_I1: int +CONTOURS_MATCH_I2: int +CONTOURS_MATCH_I3: int +ShapeMatchModes = int # One of [CONTOURS_MATCH_I1, CONTOURS_MATCH_I2, CONTOURS_MATCH_I3] + +HOUGH_STANDARD: int +HOUGH_PROBABILISTIC: int +HOUGH_MULTI_SCALE: int +HOUGH_GRADIENT: int +HOUGH_GRADIENT_ALT: int +HoughModes = int # One of [HOUGH_STANDARD, HOUGH_PROBABILISTIC, HOUGH_MULTI_SCALE, HOUGH_GRADIENT, HOUGH_GRADIENT_ALT] + +LSD_REFINE_NONE: int +LSD_REFINE_STD: int +LSD_REFINE_ADV: int +LineSegmentDetectorModes = int # One of [LSD_REFINE_NONE, LSD_REFINE_STD, LSD_REFINE_ADV] + +HISTCMP_CORREL: int +HISTCMP_CHISQR: int +HISTCMP_INTERSECT: int +HISTCMP_BHATTACHARYYA: int +HISTCMP_HELLINGER: int +HISTCMP_CHISQR_ALT: int +HISTCMP_KL_DIV: int +# One of [HISTCMP_CORREL, HISTCMP_CHISQR, HISTCMP_INTERSECT, +# HISTCMP_BHATTACHARYYA, HISTCMP_HELLINGER, HISTCMP_CHISQR_ALT, +# HISTCMP_KL_DIV] +HistCompMethods = int + +COLOR_BGR2BGRA: int +COLOR_RGB2RGBA: int +COLOR_BGRA2BGR: int +COLOR_RGBA2RGB: int +COLOR_BGR2RGBA: int +COLOR_RGB2BGRA: int +COLOR_RGBA2BGR: int +COLOR_BGRA2RGB: int +COLOR_BGR2RGB: int +COLOR_RGB2BGR: int +COLOR_BGRA2RGBA: int +COLOR_RGBA2BGRA: int +COLOR_BGR2GRAY: int +COLOR_RGB2GRAY: int +COLOR_GRAY2BGR: int +COLOR_GRAY2RGB: int +COLOR_GRAY2BGRA: int +COLOR_GRAY2RGBA: int +COLOR_BGRA2GRAY: int +COLOR_RGBA2GRAY: int +COLOR_BGR2BGR565: int +COLOR_RGB2BGR565: int +COLOR_BGR5652BGR: int +COLOR_BGR5652RGB: int +COLOR_BGRA2BGR565: int +COLOR_RGBA2BGR565: int +COLOR_BGR5652BGRA: int +COLOR_BGR5652RGBA: int +COLOR_GRAY2BGR565: int +COLOR_BGR5652GRAY: int +COLOR_BGR2BGR555: int +COLOR_RGB2BGR555: int +COLOR_BGR5552BGR: int +COLOR_BGR5552RGB: int +COLOR_BGRA2BGR555: int +COLOR_RGBA2BGR555: int +COLOR_BGR5552BGRA: int +COLOR_BGR5552RGBA: int +COLOR_GRAY2BGR555: int +COLOR_BGR5552GRAY: int +COLOR_BGR2XYZ: int +COLOR_RGB2XYZ: int +COLOR_XYZ2BGR: int +COLOR_XYZ2RGB: int +COLOR_BGR2YCrCb: int +COLOR_RGB2YCrCb: int +COLOR_YCrCb2BGR: int +COLOR_YCrCb2RGB: int +COLOR_BGR2HSV: int +COLOR_RGB2HSV: int +COLOR_BGR2Lab: int +COLOR_RGB2Lab: int +COLOR_BGR2Luv: int +COLOR_RGB2Luv: int +COLOR_BGR2HLS: int +COLOR_RGB2HLS: int +COLOR_HSV2BGR: int +COLOR_HSV2RGB: int +COLOR_Lab2BGR: int +COLOR_Lab2RGB: int +COLOR_Luv2BGR: int +COLOR_Luv2RGB: int +COLOR_HLS2BGR: int +COLOR_HLS2RGB: int +COLOR_BGR2HSV_FULL: int +COLOR_RGB2HSV_FULL: int +COLOR_BGR2HLS_FULL: int +COLOR_RGB2HLS_FULL: int +COLOR_HSV2BGR_FULL: int +COLOR_HSV2RGB_FULL: int +COLOR_HLS2BGR_FULL: int +COLOR_HLS2RGB_FULL: int +COLOR_LBGR2Lab: int +COLOR_LRGB2Lab: int +COLOR_LBGR2Luv: int +COLOR_LRGB2Luv: int +COLOR_Lab2LBGR: int +COLOR_Lab2LRGB: int +COLOR_Luv2LBGR: int +COLOR_Luv2LRGB: int +COLOR_BGR2YUV: int +COLOR_RGB2YUV: int +COLOR_YUV2BGR: int +COLOR_YUV2RGB: int +COLOR_YUV2RGB_NV12: int +COLOR_YUV2BGR_NV12: int +COLOR_YUV2RGB_NV21: int +COLOR_YUV2BGR_NV21: int +COLOR_YUV420sp2RGB: int +COLOR_YUV420sp2BGR: int +COLOR_YUV2RGBA_NV12: int +COLOR_YUV2BGRA_NV12: int +COLOR_YUV2RGBA_NV21: int +COLOR_YUV2BGRA_NV21: int +COLOR_YUV420sp2RGBA: int +COLOR_YUV420sp2BGRA: int +COLOR_YUV2RGB_YV12: int +COLOR_YUV2BGR_YV12: int +COLOR_YUV2RGB_IYUV: int +COLOR_YUV2BGR_IYUV: int +COLOR_YUV2RGB_I420: int +COLOR_YUV2BGR_I420: int +COLOR_YUV420p2RGB: int +COLOR_YUV420p2BGR: int +COLOR_YUV2RGBA_YV12: int +COLOR_YUV2BGRA_YV12: int +COLOR_YUV2RGBA_IYUV: int +COLOR_YUV2BGRA_IYUV: int +COLOR_YUV2RGBA_I420: int +COLOR_YUV2BGRA_I420: int +COLOR_YUV420p2RGBA: int +COLOR_YUV420p2BGRA: int +COLOR_YUV2GRAY_420: int +COLOR_YUV2GRAY_NV21: int +COLOR_YUV2GRAY_NV12: int +COLOR_YUV2GRAY_YV12: int +COLOR_YUV2GRAY_IYUV: int +COLOR_YUV2GRAY_I420: int +COLOR_YUV420sp2GRAY: int +COLOR_YUV420p2GRAY: int +COLOR_YUV2RGB_UYVY: int +COLOR_YUV2BGR_UYVY: int +COLOR_YUV2RGB_Y422: int +COLOR_YUV2BGR_Y422: int +COLOR_YUV2RGB_UYNV: int +COLOR_YUV2BGR_UYNV: int +COLOR_YUV2RGBA_UYVY: int +COLOR_YUV2BGRA_UYVY: int +COLOR_YUV2RGBA_Y422: int +COLOR_YUV2BGRA_Y422: int +COLOR_YUV2RGBA_UYNV: int +COLOR_YUV2BGRA_UYNV: int +COLOR_YUV2RGB_YUY2: int +COLOR_YUV2BGR_YUY2: int +COLOR_YUV2RGB_YVYU: int +COLOR_YUV2BGR_YVYU: int +COLOR_YUV2RGB_YUYV: int +COLOR_YUV2BGR_YUYV: int +COLOR_YUV2RGB_YUNV: int +COLOR_YUV2BGR_YUNV: int +COLOR_YUV2RGBA_YUY2: int +COLOR_YUV2BGRA_YUY2: int +COLOR_YUV2RGBA_YVYU: int +COLOR_YUV2BGRA_YVYU: int +COLOR_YUV2RGBA_YUYV: int +COLOR_YUV2BGRA_YUYV: int +COLOR_YUV2RGBA_YUNV: int +COLOR_YUV2BGRA_YUNV: int +COLOR_YUV2GRAY_UYVY: int +COLOR_YUV2GRAY_YUY2: int +COLOR_YUV2GRAY_Y422: int +COLOR_YUV2GRAY_UYNV: int +COLOR_YUV2GRAY_YVYU: int +COLOR_YUV2GRAY_YUYV: int +COLOR_YUV2GRAY_YUNV: int +COLOR_RGBA2mRGBA: int +COLOR_mRGBA2RGBA: int +COLOR_RGB2YUV_I420: int +COLOR_BGR2YUV_I420: int +COLOR_RGB2YUV_IYUV: int +COLOR_BGR2YUV_IYUV: int +COLOR_RGBA2YUV_I420: int +COLOR_BGRA2YUV_I420: int +COLOR_RGBA2YUV_IYUV: int +COLOR_BGRA2YUV_IYUV: int +COLOR_RGB2YUV_YV12: int +COLOR_BGR2YUV_YV12: int +COLOR_RGBA2YUV_YV12: int +COLOR_BGRA2YUV_YV12: int +COLOR_BayerBG2BGR: int +COLOR_BayerGB2BGR: int +COLOR_BayerRG2BGR: int +COLOR_BayerGR2BGR: int +COLOR_BayerRGGB2BGR: int +COLOR_BayerGRBG2BGR: int +COLOR_BayerBGGR2BGR: int +COLOR_BayerGBRG2BGR: int +COLOR_BayerRGGB2RGB: int +COLOR_BayerGRBG2RGB: int +COLOR_BayerBGGR2RGB: int +COLOR_BayerGBRG2RGB: int +COLOR_BayerBG2RGB: int +COLOR_BayerGB2RGB: int +COLOR_BayerRG2RGB: int +COLOR_BayerGR2RGB: int +COLOR_BayerBG2GRAY: int +COLOR_BayerGB2GRAY: int +COLOR_BayerRG2GRAY: int +COLOR_BayerGR2GRAY: int +COLOR_BayerRGGB2GRAY: int +COLOR_BayerGRBG2GRAY: int +COLOR_BayerBGGR2GRAY: int +COLOR_BayerGBRG2GRAY: int +COLOR_BayerBG2BGR_VNG: int +COLOR_BayerGB2BGR_VNG: int +COLOR_BayerRG2BGR_VNG: int +COLOR_BayerGR2BGR_VNG: int +COLOR_BayerRGGB2BGR_VNG: int +COLOR_BayerGRBG2BGR_VNG: int +COLOR_BayerBGGR2BGR_VNG: int +COLOR_BayerGBRG2BGR_VNG: int +COLOR_BayerRGGB2RGB_VNG: int +COLOR_BayerGRBG2RGB_VNG: int +COLOR_BayerBGGR2RGB_VNG: int +COLOR_BayerGBRG2RGB_VNG: int +COLOR_BayerBG2RGB_VNG: int +COLOR_BayerGB2RGB_VNG: int +COLOR_BayerRG2RGB_VNG: int +COLOR_BayerGR2RGB_VNG: int +COLOR_BayerBG2BGR_EA: int +COLOR_BayerGB2BGR_EA: int +COLOR_BayerRG2BGR_EA: int +COLOR_BayerGR2BGR_EA: int +COLOR_BayerRGGB2BGR_EA: int +COLOR_BayerGRBG2BGR_EA: int +COLOR_BayerBGGR2BGR_EA: int +COLOR_BayerGBRG2BGR_EA: int +COLOR_BayerRGGB2RGB_EA: int +COLOR_BayerGRBG2RGB_EA: int +COLOR_BayerBGGR2RGB_EA: int +COLOR_BayerGBRG2RGB_EA: int +COLOR_BayerBG2RGB_EA: int +COLOR_BayerGB2RGB_EA: int +COLOR_BayerRG2RGB_EA: int +COLOR_BayerGR2RGB_EA: int +COLOR_BayerBG2BGRA: int +COLOR_BayerGB2BGRA: int +COLOR_BayerRG2BGRA: int +COLOR_BayerGR2BGRA: int +COLOR_BayerRGGB2BGRA: int +COLOR_BayerGRBG2BGRA: int +COLOR_BayerBGGR2BGRA: int +COLOR_BayerGBRG2BGRA: int +COLOR_BayerRGGB2RGBA: int +COLOR_BayerGRBG2RGBA: int +COLOR_BayerBGGR2RGBA: int +COLOR_BayerGBRG2RGBA: int +COLOR_BayerBG2RGBA: int +COLOR_BayerGB2RGBA: int +COLOR_BayerRG2RGBA: int +COLOR_BayerGR2RGBA: int +COLOR_COLORCVT_MAX: int +ColorConversionCodes = int # One of [COLOR_BGR2BGRA, COLOR_RGB2RGBA, COLOR_BGRA2BGR, COLOR_RGBA2RGB, COLOR_BGR2RGBA, +# COLOR_RGB2BGRA, COLOR_RGBA2BGR, COLOR_BGRA2RGB, COLOR_BGR2RGB, COLOR_RGB2BGR, COLOR_BGRA2RGBA, COLOR_RGBA2BGRA, +# COLOR_BGR2GRAY, COLOR_RGB2GRAY, COLOR_GRAY2BGR, COLOR_GRAY2RGB, COLOR_GRAY2BGRA, COLOR_GRAY2RGBA, COLOR_BGRA2GRAY, +# COLOR_RGBA2GRAY, COLOR_BGR2BGR565, COLOR_RGB2BGR565, COLOR_BGR5652BGR, COLOR_BGR5652RGB, COLOR_BGRA2BGR565, +# COLOR_RGBA2BGR565, COLOR_BGR5652BGRA, COLOR_BGR5652RGBA, COLOR_GRAY2BGR565, COLOR_BGR5652GRAY, COLOR_BGR2BGR555, +# COLOR_RGB2BGR555, COLOR_BGR5552BGR, COLOR_BGR5552RGB, COLOR_BGRA2BGR555, COLOR_RGBA2BGR555, COLOR_BGR5552BGRA, +# COLOR_BGR5552RGBA, COLOR_GRAY2BGR555, COLOR_BGR5552GRAY, COLOR_BGR2XYZ, COLOR_RGB2XYZ, COLOR_XYZ2BGR, COLOR_XYZ2RGB, +# COLOR_BGR2YCrCb, COLOR_RGB2YCrCb, COLOR_YCrCb2BGR, COLOR_YCrCb2RGB, COLOR_BGR2HSV, COLOR_RGB2HSV, COLOR_BGR2Lab, +# COLOR_RGB2Lab, COLOR_BGR2Luv, COLOR_RGB2Luv, COLOR_BGR2HLS, COLOR_RGB2HLS, COLOR_HSV2BGR, COLOR_HSV2RGB, +# COLOR_Lab2BGR, COLOR_Lab2RGB, COLOR_Luv2BGR, COLOR_Luv2RGB, COLOR_HLS2BGR, COLOR_HLS2RGB, COLOR_BGR2HSV_FULL, +# COLOR_RGB2HSV_FULL, COLOR_BGR2HLS_FULL, COLOR_RGB2HLS_FULL, COLOR_HSV2BGR_FULL, COLOR_HSV2RGB_FULL, +# COLOR_HLS2BGR_FULL, COLOR_HLS2RGB_FULL, COLOR_LBGR2Lab, COLOR_LRGB2Lab, COLOR_LBGR2Luv, COLOR_LRGB2Luv, +# COLOR_Lab2LBGR, COLOR_Lab2LRGB, COLOR_Luv2LBGR, COLOR_Luv2LRGB, COLOR_BGR2YUV, COLOR_RGB2YUV, COLOR_YUV2BGR, +# COLOR_YUV2RGB, COLOR_YUV2RGB_NV12, COLOR_YUV2BGR_NV12, COLOR_YUV2RGB_NV21, COLOR_YUV2BGR_NV21, COLOR_YUV420sp2RGB, +# COLOR_YUV420sp2BGR, COLOR_YUV2RGBA_NV12, COLOR_YUV2BGRA_NV12, COLOR_YUV2RGBA_NV21, COLOR_YUV2BGRA_NV21, +# COLOR_YUV420sp2RGBA, COLOR_YUV420sp2BGRA, COLOR_YUV2RGB_YV12, COLOR_YUV2BGR_YV12, COLOR_YUV2RGB_IYUV, +# COLOR_YUV2BGR_IYUV, COLOR_YUV2RGB_I420, COLOR_YUV2BGR_I420, COLOR_YUV420p2RGB, COLOR_YUV420p2BGR, COLOR_YUV2RGBA_YV12, +# COLOR_YUV2BGRA_YV12, COLOR_YUV2RGBA_IYUV, COLOR_YUV2BGRA_IYUV, COLOR_YUV2RGBA_I420, COLOR_YUV2BGRA_I420, +# COLOR_YUV420p2RGBA, COLOR_YUV420p2BGRA, COLOR_YUV2GRAY_420, COLOR_YUV2GRAY_NV21, COLOR_YUV2GRAY_NV12, +# COLOR_YUV2GRAY_YV12, COLOR_YUV2GRAY_IYUV, COLOR_YUV2GRAY_I420, COLOR_YUV420sp2GRAY, COLOR_YUV420p2GRAY, +# COLOR_YUV2RGB_UYVY, COLOR_YUV2BGR_UYVY, COLOR_YUV2RGB_Y422, COLOR_YUV2BGR_Y422, COLOR_YUV2RGB_UYNV, +# COLOR_YUV2BGR_UYNV, COLOR_YUV2RGBA_UYVY, COLOR_YUV2BGRA_UYVY, COLOR_YUV2RGBA_Y422, COLOR_YUV2BGRA_Y422, +# COLOR_YUV2RGBA_UYNV, COLOR_YUV2BGRA_UYNV, COLOR_YUV2RGB_YUY2, COLOR_YUV2BGR_YUY2, COLOR_YUV2RGB_YVYU, +# COLOR_YUV2BGR_YVYU, COLOR_YUV2RGB_YUYV, COLOR_YUV2BGR_YUYV, COLOR_YUV2RGB_YUNV, COLOR_YUV2BGR_YUNV, +# COLOR_YUV2RGBA_YUY2, COLOR_YUV2BGRA_YUY2, COLOR_YUV2RGBA_YVYU, COLOR_YUV2BGRA_YVYU, COLOR_YUV2RGBA_YUYV, +# COLOR_YUV2BGRA_YUYV, COLOR_YUV2RGBA_YUNV, COLOR_YUV2BGRA_YUNV, COLOR_YUV2GRAY_UYVY, COLOR_YUV2GRAY_YUY2, +# COLOR_YUV2GRAY_Y422, COLOR_YUV2GRAY_UYNV, COLOR_YUV2GRAY_YVYU, COLOR_YUV2GRAY_YUYV, COLOR_YUV2GRAY_YUNV, +# COLOR_RGBA2mRGBA, COLOR_mRGBA2RGBA, COLOR_RGB2YUV_I420, COLOR_BGR2YUV_I420, COLOR_RGB2YUV_IYUV, COLOR_BGR2YUV_IYUV, +# COLOR_RGBA2YUV_I420, COLOR_BGRA2YUV_I420, COLOR_RGBA2YUV_IYUV, COLOR_BGRA2YUV_IYUV, COLOR_RGB2YUV_YV12, +# COLOR_BGR2YUV_YV12, COLOR_RGBA2YUV_YV12, COLOR_BGRA2YUV_YV12, COLOR_BayerBG2BGR, COLOR_BayerGB2BGR, COLOR_BayerRG2BGR, +# COLOR_BayerGR2BGR, COLOR_BayerRGGB2BGR, COLOR_BayerGRBG2BGR, COLOR_BayerBGGR2BGR, COLOR_BayerGBRG2BGR, +# COLOR_BayerRGGB2RGB, COLOR_BayerGRBG2RGB, COLOR_BayerBGGR2RGB, COLOR_BayerGBRG2RGB, COLOR_BayerBG2RGB, +# COLOR_BayerGB2RGB, COLOR_BayerRG2RGB, COLOR_BayerGR2RGB, COLOR_BayerBG2GRAY, COLOR_BayerGB2GRAY, COLOR_BayerRG2GRAY, +# COLOR_BayerGR2GRAY, COLOR_BayerRGGB2GRAY, COLOR_BayerGRBG2GRAY, COLOR_BayerBGGR2GRAY, COLOR_BayerGBRG2GRAY, +# COLOR_BayerBG2BGR_VNG, COLOR_BayerGB2BGR_VNG, COLOR_BayerRG2BGR_VNG, COLOR_BayerGR2BGR_VNG, COLOR_BayerRGGB2BGR_VNG, +# COLOR_BayerGRBG2BGR_VNG, COLOR_BayerBGGR2BGR_VNG, COLOR_BayerGBRG2BGR_VNG, COLOR_BayerRGGB2RGB_VNG, +# COLOR_BayerGRBG2RGB_VNG, COLOR_BayerBGGR2RGB_VNG, COLOR_BayerGBRG2RGB_VNG, COLOR_BayerBG2RGB_VNG, +# COLOR_BayerGB2RGB_VNG, COLOR_BayerRG2RGB_VNG, COLOR_BayerGR2RGB_VNG, COLOR_BayerBG2BGR_EA, COLOR_BayerGB2BGR_EA, +# COLOR_BayerRG2BGR_EA, COLOR_BayerGR2BGR_EA, COLOR_BayerRGGB2BGR_EA, COLOR_BayerGRBG2BGR_EA, COLOR_BayerBGGR2BGR_EA, +# COLOR_BayerGBRG2BGR_EA, COLOR_BayerRGGB2RGB_EA, COLOR_BayerGRBG2RGB_EA, COLOR_BayerBGGR2RGB_EA, +# COLOR_BayerGBRG2RGB_EA, COLOR_BayerBG2RGB_EA, COLOR_BayerGB2RGB_EA, COLOR_BayerRG2RGB_EA, COLOR_BayerGR2RGB_EA, +# COLOR_BayerBG2BGRA, COLOR_BayerGB2BGRA, COLOR_BayerRG2BGRA, COLOR_BayerGR2BGRA, COLOR_BayerRGGB2BGRA, +# COLOR_BayerGRBG2BGRA, COLOR_BayerBGGR2BGRA, COLOR_BayerGBRG2BGRA, COLOR_BayerRGGB2RGBA, COLOR_BayerGRBG2RGBA, +# COLOR_BayerBGGR2RGBA, COLOR_BayerGBRG2RGBA, COLOR_BayerBG2RGBA, COLOR_BayerGB2RGBA, COLOR_BayerRG2RGBA, +# COLOR_BayerGR2RGBA, COLOR_COLORCVT_MAX] + +INTERSECT_NONE: int +INTERSECT_PARTIAL: int +INTERSECT_FULL: int +RectanglesIntersectTypes = int # One of [INTERSECT_NONE, INTERSECT_PARTIAL, INTERSECT_FULL] + +FILLED: int +LINE_4: int +LINE_8: int +LINE_AA: int +LineTypes = int # One of [FILLED, LINE_4, LINE_8, LINE_AA] + +FONT_HERSHEY_SIMPLEX: int +FONT_HERSHEY_PLAIN: int +FONT_HERSHEY_DUPLEX: int +FONT_HERSHEY_COMPLEX: int +FONT_HERSHEY_TRIPLEX: int +FONT_HERSHEY_COMPLEX_SMALL: int +FONT_HERSHEY_SCRIPT_SIMPLEX: int +FONT_HERSHEY_SCRIPT_COMPLEX: int +FONT_ITALIC: int +# One of [FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, +# FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL, +# FONT_HERSHEY_SCRIPT_SIMPLEX, FONT_HERSHEY_SCRIPT_COMPLEX, FONT_ITALIC] +HersheyFonts = int + +MARKER_CROSS: int +MARKER_TILTED_CROSS: int +MARKER_STAR: int +MARKER_DIAMOND: int +MARKER_SQUARE: int +MARKER_TRIANGLE_UP: int +MARKER_TRIANGLE_DOWN: int +# One of [MARKER_CROSS, MARKER_TILTED_CROSS, MARKER_STAR, MARKER_DIAMOND, +# MARKER_SQUARE, MARKER_TRIANGLE_UP, MARKER_TRIANGLE_DOWN] +MarkerTypes = int + +TM_SQDIFF: int +TM_SQDIFF_NORMED: int +TM_CCORR: int +TM_CCORR_NORMED: int +TM_CCOEFF: int +TM_CCOEFF_NORMED: int +TemplateMatchModes = int # One of [TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED] + +COLORMAP_AUTUMN: int +COLORMAP_BONE: int +COLORMAP_JET: int +COLORMAP_WINTER: int +COLORMAP_RAINBOW: int +COLORMAP_OCEAN: int +COLORMAP_SUMMER: int +COLORMAP_SPRING: int +COLORMAP_COOL: int +COLORMAP_HSV: int +COLORMAP_PINK: int +COLORMAP_HOT: int +COLORMAP_PARULA: int +COLORMAP_MAGMA: int +COLORMAP_INFERNO: int +COLORMAP_PLASMA: int +COLORMAP_VIRIDIS: int +COLORMAP_CIVIDIS: int +COLORMAP_TWILIGHT: int +COLORMAP_TWILIGHT_SHIFTED: int +COLORMAP_TURBO: int +COLORMAP_DEEPGREEN: int +ColormapTypes = int # One of [COLORMAP_AUTUMN, COLORMAP_BONE, COLORMAP_JET, COLORMAP_WINTER, COLORMAP_RAINBOW, +# COLORMAP_OCEAN, COLORMAP_SUMMER, COLORMAP_SPRING, COLORMAP_COOL, COLORMAP_HSV, COLORMAP_PINK, COLORMAP_HOT, +# COLORMAP_PARULA, COLORMAP_MAGMA, COLORMAP_INFERNO, COLORMAP_PLASMA, COLORMAP_VIRIDIS, COLORMAP_CIVIDIS, +# COLORMAP_TWILIGHT, COLORMAP_TWILIGHT_SHIFTED, COLORMAP_TURBO, COLORMAP_DEEPGREEN] + +INPAINT_NS: int +INPAINT_TELEA: int +LDR_SIZE: int +NORMAL_CLONE: int +MIXED_CLONE: int +MONOCHROME_TRANSFER: int +RECURS_FILTER: int +NORMCONV_FILTER: int +CAP_PROP_DC1394_OFF: int +CAP_PROP_DC1394_MODE_MANUAL: int +CAP_PROP_DC1394_MODE_AUTO: int +CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO: int +CAP_PROP_DC1394_MAX: int +CAP_OPENNI_DEPTH_GENERATOR: int +CAP_OPENNI_IMAGE_GENERATOR: int +CAP_OPENNI_IR_GENERATOR: int +CAP_OPENNI_GENERATORS_MASK: int +CAP_PROP_OPENNI_OUTPUT_MODE: int +CAP_PROP_OPENNI_FRAME_MAX_DEPTH: int +CAP_PROP_OPENNI_BASELINE: int +CAP_PROP_OPENNI_FOCAL_LENGTH: int +CAP_PROP_OPENNI_REGISTRATION: int +CAP_PROP_OPENNI_REGISTRATION_ON: int +CAP_PROP_OPENNI_APPROX_FRAME_SYNC: int +CAP_PROP_OPENNI_MAX_BUFFER_SIZE: int +CAP_PROP_OPENNI_CIRCLE_BUFFER: int +CAP_PROP_OPENNI_MAX_TIME_DURATION: int +CAP_PROP_OPENNI_GENERATOR_PRESENT: int +CAP_PROP_OPENNI2_SYNC: int +CAP_PROP_OPENNI2_MIRROR: int +CAP_OPENNI_IMAGE_GENERATOR_PRESENT: int +CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE: int +CAP_OPENNI_DEPTH_GENERATOR_PRESENT: int +CAP_OPENNI_DEPTH_GENERATOR_BASELINE: int +CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH: int +CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION: int +CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON: int +CAP_OPENNI_IR_GENERATOR_PRESENT: int +CAP_OPENNI_DEPTH_MAP: int +CAP_OPENNI_POINT_CLOUD_MAP: int +CAP_OPENNI_DISPARITY_MAP: int +CAP_OPENNI_DISPARITY_MAP_32F: int +CAP_OPENNI_VALID_DEPTH_MASK: int +CAP_OPENNI_BGR_IMAGE: int +CAP_OPENNI_GRAY_IMAGE: int +CAP_OPENNI_IR_IMAGE: int +CAP_OPENNI_VGA_30HZ: int +CAP_OPENNI_SXGA_15HZ: int +CAP_OPENNI_SXGA_30HZ: int +CAP_OPENNI_QVGA_30HZ: int +CAP_OPENNI_QVGA_60HZ: int +CAP_PROP_GSTREAMER_QUEUE_LENGTH: int +CAP_PROP_PVAPI_MULTICASTIP: int +CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE: int +CAP_PROP_PVAPI_DECIMATIONHORIZONTAL: int +CAP_PROP_PVAPI_DECIMATIONVERTICAL: int +CAP_PROP_PVAPI_BINNINGX: int +CAP_PROP_PVAPI_BINNINGY: int +CAP_PROP_PVAPI_PIXELFORMAT: int +CAP_PVAPI_FSTRIGMODE_FREERUN: int +CAP_PVAPI_FSTRIGMODE_SYNCIN1: int +CAP_PVAPI_FSTRIGMODE_SYNCIN2: int +CAP_PVAPI_FSTRIGMODE_FIXEDRATE: int +CAP_PVAPI_FSTRIGMODE_SOFTWARE: int +CAP_PVAPI_DECIMATION_OFF: int +CAP_PVAPI_DECIMATION_2OUTOF4: int +CAP_PVAPI_DECIMATION_2OUTOF8: int +CAP_PVAPI_DECIMATION_2OUTOF16: int +CAP_PVAPI_PIXELFORMAT_MONO8: int +CAP_PVAPI_PIXELFORMAT_MONO16: int +CAP_PVAPI_PIXELFORMAT_BAYER8: int +CAP_PVAPI_PIXELFORMAT_BAYER16: int +CAP_PVAPI_PIXELFORMAT_RGB24: int +CAP_PVAPI_PIXELFORMAT_BGR24: int +CAP_PVAPI_PIXELFORMAT_RGBA32: int +CAP_PVAPI_PIXELFORMAT_BGRA32: int +CAP_PROP_XI_DOWNSAMPLING: int +CAP_PROP_XI_DATA_FORMAT: int +CAP_PROP_XI_OFFSET_X: int +CAP_PROP_XI_OFFSET_Y: int +CAP_PROP_XI_TRG_SOURCE: int +CAP_PROP_XI_TRG_SOFTWARE: int +CAP_PROP_XI_GPI_SELECTOR: int +CAP_PROP_XI_GPI_MODE: int +CAP_PROP_XI_GPI_LEVEL: int +CAP_PROP_XI_GPO_SELECTOR: int +CAP_PROP_XI_GPO_MODE: int +CAP_PROP_XI_LED_SELECTOR: int +CAP_PROP_XI_LED_MODE: int +CAP_PROP_XI_MANUAL_WB: int +CAP_PROP_XI_AUTO_WB: int +CAP_PROP_XI_AEAG: int +CAP_PROP_XI_EXP_PRIORITY: int +CAP_PROP_XI_AE_MAX_LIMIT: int +CAP_PROP_XI_AG_MAX_LIMIT: int +CAP_PROP_XI_AEAG_LEVEL: int +CAP_PROP_XI_TIMEOUT: int +CAP_PROP_XI_EXPOSURE: int +CAP_PROP_XI_EXPOSURE_BURST_COUNT: int +CAP_PROP_XI_GAIN_SELECTOR: int +CAP_PROP_XI_GAIN: int +CAP_PROP_XI_DOWNSAMPLING_TYPE: int +CAP_PROP_XI_BINNING_SELECTOR: int +CAP_PROP_XI_BINNING_VERTICAL: int +CAP_PROP_XI_BINNING_HORIZONTAL: int +CAP_PROP_XI_BINNING_PATTERN: int +CAP_PROP_XI_DECIMATION_SELECTOR: int +CAP_PROP_XI_DECIMATION_VERTICAL: int +CAP_PROP_XI_DECIMATION_HORIZONTAL: int +CAP_PROP_XI_DECIMATION_PATTERN: int +CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR: int +CAP_PROP_XI_TEST_PATTERN: int +CAP_PROP_XI_IMAGE_DATA_FORMAT: int +CAP_PROP_XI_SHUTTER_TYPE: int +CAP_PROP_XI_SENSOR_TAPS: int +CAP_PROP_XI_AEAG_ROI_OFFSET_X: int +CAP_PROP_XI_AEAG_ROI_OFFSET_Y: int +CAP_PROP_XI_AEAG_ROI_WIDTH: int +CAP_PROP_XI_AEAG_ROI_HEIGHT: int +CAP_PROP_XI_BPC: int +CAP_PROP_XI_WB_KR: int +CAP_PROP_XI_WB_KG: int +CAP_PROP_XI_WB_KB: int +CAP_PROP_XI_WIDTH: int +CAP_PROP_XI_HEIGHT: int +CAP_PROP_XI_REGION_SELECTOR: int +CAP_PROP_XI_REGION_MODE: int +CAP_PROP_XI_LIMIT_BANDWIDTH: int +CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH: int +CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH: int +CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH: int +CAP_PROP_XI_OUTPUT_DATA_PACKING: int +CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE: int +CAP_PROP_XI_IS_COOLED: int +CAP_PROP_XI_COOLING: int +CAP_PROP_XI_TARGET_TEMP: int +CAP_PROP_XI_CHIP_TEMP: int +CAP_PROP_XI_HOUS_TEMP: int +CAP_PROP_XI_HOUS_BACK_SIDE_TEMP: int +CAP_PROP_XI_SENSOR_BOARD_TEMP: int +CAP_PROP_XI_CMS: int +CAP_PROP_XI_APPLY_CMS: int +CAP_PROP_XI_IMAGE_IS_COLOR: int +CAP_PROP_XI_COLOR_FILTER_ARRAY: int +CAP_PROP_XI_GAMMAY: int +CAP_PROP_XI_GAMMAC: int +CAP_PROP_XI_SHARPNESS: int +CAP_PROP_XI_CC_MATRIX_00: int +CAP_PROP_XI_CC_MATRIX_01: int +CAP_PROP_XI_CC_MATRIX_02: int +CAP_PROP_XI_CC_MATRIX_03: int +CAP_PROP_XI_CC_MATRIX_10: int +CAP_PROP_XI_CC_MATRIX_11: int +CAP_PROP_XI_CC_MATRIX_12: int +CAP_PROP_XI_CC_MATRIX_13: int +CAP_PROP_XI_CC_MATRIX_20: int +CAP_PROP_XI_CC_MATRIX_21: int +CAP_PROP_XI_CC_MATRIX_22: int +CAP_PROP_XI_CC_MATRIX_23: int +CAP_PROP_XI_CC_MATRIX_30: int +CAP_PROP_XI_CC_MATRIX_31: int +CAP_PROP_XI_CC_MATRIX_32: int +CAP_PROP_XI_CC_MATRIX_33: int +CAP_PROP_XI_DEFAULT_CC_MATRIX: int +CAP_PROP_XI_TRG_SELECTOR: int +CAP_PROP_XI_ACQ_FRAME_BURST_COUNT: int +CAP_PROP_XI_DEBOUNCE_EN: int +CAP_PROP_XI_DEBOUNCE_T0: int +CAP_PROP_XI_DEBOUNCE_T1: int +CAP_PROP_XI_DEBOUNCE_POL: int +CAP_PROP_XI_LENS_MODE: int +CAP_PROP_XI_LENS_APERTURE_VALUE: int +CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE: int +CAP_PROP_XI_LENS_FOCUS_MOVE: int +CAP_PROP_XI_LENS_FOCUS_DISTANCE: int +CAP_PROP_XI_LENS_FOCAL_LENGTH: int +CAP_PROP_XI_LENS_FEATURE_SELECTOR: int +CAP_PROP_XI_LENS_FEATURE: int +CAP_PROP_XI_DEVICE_MODEL_ID: int +CAP_PROP_XI_DEVICE_SN: int +CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA: int +CAP_PROP_XI_IMAGE_PAYLOAD_SIZE: int +CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT: int +CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ: int +CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX: int +CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT: int +CAP_PROP_XI_FRAMERATE: int +CAP_PROP_XI_COUNTER_SELECTOR: int +CAP_PROP_XI_COUNTER_VALUE: int +CAP_PROP_XI_ACQ_TIMING_MODE: int +CAP_PROP_XI_AVAILABLE_BANDWIDTH: int +CAP_PROP_XI_BUFFER_POLICY: int +CAP_PROP_XI_LUT_EN: int +CAP_PROP_XI_LUT_INDEX: int +CAP_PROP_XI_LUT_VALUE: int +CAP_PROP_XI_TRG_DELAY: int +CAP_PROP_XI_TS_RST_MODE: int +CAP_PROP_XI_TS_RST_SOURCE: int +CAP_PROP_XI_IS_DEVICE_EXIST: int +CAP_PROP_XI_ACQ_BUFFER_SIZE: int +CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT: int +CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE: int +CAP_PROP_XI_BUFFERS_QUEUE_SIZE: int +CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT: int +CAP_PROP_XI_RECENT_FRAME: int +CAP_PROP_XI_DEVICE_RESET: int +CAP_PROP_XI_COLUMN_FPN_CORRECTION: int +CAP_PROP_XI_ROW_FPN_CORRECTION: int +CAP_PROP_XI_SENSOR_MODE: int +CAP_PROP_XI_HDR: int +CAP_PROP_XI_HDR_KNEEPOINT_COUNT: int +CAP_PROP_XI_HDR_T1: int +CAP_PROP_XI_HDR_T2: int +CAP_PROP_XI_KNEEPOINT1: int +CAP_PROP_XI_KNEEPOINT2: int +CAP_PROP_XI_IMAGE_BLACK_LEVEL: int +CAP_PROP_XI_HW_REVISION: int +CAP_PROP_XI_DEBUG_LEVEL: int +CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION: int +CAP_PROP_XI_FFS_FILE_ID: int +CAP_PROP_XI_FFS_FILE_SIZE: int +CAP_PROP_XI_FREE_FFS_SIZE: int +CAP_PROP_XI_USED_FFS_SIZE: int +CAP_PROP_XI_FFS_ACCESS_KEY: int +CAP_PROP_XI_SENSOR_FEATURE_SELECTOR: int +CAP_PROP_XI_SENSOR_FEATURE_VALUE: int +CAP_PROP_ARAVIS_AUTOTRIGGER: int +CAP_PROP_IOS_DEVICE_FOCUS: int +CAP_PROP_IOS_DEVICE_EXPOSURE: int +CAP_PROP_IOS_DEVICE_FLASH: int +CAP_PROP_IOS_DEVICE_WHITEBALANCE: int +CAP_PROP_IOS_DEVICE_TORCH: int +CAP_PROP_GIGA_FRAME_OFFSET_X: int +CAP_PROP_GIGA_FRAME_OFFSET_Y: int +CAP_PROP_GIGA_FRAME_WIDTH_MAX: int +CAP_PROP_GIGA_FRAME_HEIGH_MAX: int +CAP_PROP_GIGA_FRAME_SENS_WIDTH: int +CAP_PROP_GIGA_FRAME_SENS_HEIGH: int +CAP_PROP_INTELPERC_PROFILE_COUNT: int +CAP_PROP_INTELPERC_PROFILE_IDX: int +CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE: int +CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE: int +CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD: int +CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ: int +CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT: int +CAP_INTELPERC_DEPTH_GENERATOR: int +CAP_INTELPERC_IMAGE_GENERATOR: int +CAP_INTELPERC_IR_GENERATOR: int +CAP_INTELPERC_GENERATORS_MASK: int +CAP_INTELPERC_DEPTH_MAP: int +CAP_INTELPERC_UVDEPTH_MAP: int +CAP_INTELPERC_IR_MAP: int +CAP_INTELPERC_IMAGE: int +CAP_PROP_GPHOTO2_PREVIEW: int +CAP_PROP_GPHOTO2_WIDGET_ENUMERATE: int +CAP_PROP_GPHOTO2_RELOAD_CONFIG: int +CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE: int +CAP_PROP_GPHOTO2_COLLECT_MSGS: int +CAP_PROP_GPHOTO2_FLUSH_MSGS: int +CAP_PROP_SPEED: int +CAP_PROP_APERTURE: int +CAP_PROP_EXPOSUREPROGRAM: int +CAP_PROP_VIEWFINDER: int +CAP_PROP_IMAGES_BASE: int +CAP_PROP_IMAGES_LAST: int +LMEDS: int +RANSAC: int +RHO: int +USAC_DEFAULT: int +USAC_PARALLEL: int +USAC_FM_8PTS: int +USAC_FAST: int +USAC_ACCURATE: int +USAC_PROSAC: int +USAC_MAGSAC: int +CALIB_CB_ADAPTIVE_THRESH: int +CALIB_CB_NORMALIZE_IMAGE: int +CALIB_CB_FILTER_QUADS: int +CALIB_CB_FAST_CHECK: int +CALIB_CB_EXHAUSTIVE: int +CALIB_CB_ACCURACY: int +CALIB_CB_LARGER: int +CALIB_CB_MARKER: int +CALIB_CB_SYMMETRIC_GRID: int +CALIB_CB_ASYMMETRIC_GRID: int +CALIB_CB_CLUSTERING: int +CALIB_NINTRINSIC: int +CALIB_USE_INTRINSIC_GUESS: int +CALIB_FIX_ASPECT_RATIO: int +CALIB_FIX_PRINCIPAL_POINT: int +CALIB_ZERO_TANGENT_DIST: int +CALIB_FIX_FOCAL_LENGTH: int +CALIB_FIX_K1: int +CALIB_FIX_K2: int +CALIB_FIX_K3: int +CALIB_FIX_K4: int +CALIB_FIX_K5: int +CALIB_FIX_K6: int +CALIB_RATIONAL_MODEL: int +CALIB_THIN_PRISM_MODEL: int +CALIB_FIX_S1_S2_S3_S4: int +CALIB_TILTED_MODEL: int +CALIB_FIX_TAUX_TAUY: int +CALIB_USE_QR: int +CALIB_FIX_TANGENT_DIST: int +CALIB_FIX_INTRINSIC: int +CALIB_SAME_FOCAL_LENGTH: int +CALIB_ZERO_DISPARITY: int +CALIB_USE_LU: int +CALIB_USE_EXTRINSIC_GUESS: int +FM_7POINT: int +FM_8POINT: int +FM_LMEDS: int +FM_RANSAC: int +CASCADE_DO_CANNY_PRUNING: int +CASCADE_SCALE_IMAGE: int +CASCADE_FIND_BIGGEST_OBJECT: int +CASCADE_DO_ROUGH_SEARCH: int +OPTFLOW_USE_INITIAL_FLOW: int +OPTFLOW_LK_GET_MIN_EIGENVALS: int +OPTFLOW_FARNEBACK_GAUSSIAN: int +MOTION_TRANSLATION: int +MOTION_EUCLIDEAN: int +MOTION_AFFINE: int +MOTION_HOMOGRAPHY: int + +DrawMatchesFlags_DEFAULT: int +DrawMatchesFlags_DRAW_OVER_OUTIMG: int +DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS: int +DrawMatchesFlags_DRAW_RICH_KEYPOINTS: int +DrawMatchesFlags = int # One of [DEFAULT, DRAW_OVER_OUTIMG, NOT_DRAW_SINGLE_POINTS, DRAW_RICH_KEYPOINTS] + +IMREAD_UNCHANGED: int +IMREAD_GRAYSCALE: int +IMREAD_COLOR: int +IMREAD_ANYDEPTH: int +IMREAD_ANYCOLOR: int +IMREAD_LOAD_GDAL: int +IMREAD_REDUCED_GRAYSCALE_2: int +IMREAD_REDUCED_COLOR_2: int +IMREAD_REDUCED_GRAYSCALE_4: int +IMREAD_REDUCED_COLOR_4: int +IMREAD_REDUCED_GRAYSCALE_8: int +IMREAD_REDUCED_COLOR_8: int +IMREAD_IGNORE_ORIENTATION: int +ImreadModes = int # One of [IMREAD_UNCHANGED, IMREAD_GRAYSCALE, IMREAD_COLOR, IMREAD_ANYDEPTH, IMREAD_ANYCOLOR, +# IMREAD_LOAD_GDAL, IMREAD_REDUCED_GRAYSCALE_2, IMREAD_REDUCED_COLOR_2, IMREAD_REDUCED_GRAYSCALE_4, +# IMREAD_REDUCED_COLOR_4, IMREAD_REDUCED_GRAYSCALE_8, IMREAD_REDUCED_COLOR_8, IMREAD_IGNORE_ORIENTATION] + +IMWRITE_JPEG_QUALITY: int +IMWRITE_JPEG_PROGRESSIVE: int +IMWRITE_JPEG_OPTIMIZE: int +IMWRITE_JPEG_RST_INTERVAL: int +IMWRITE_JPEG_LUMA_QUALITY: int +IMWRITE_JPEG_CHROMA_QUALITY: int +IMWRITE_JPEG_SAMPLING_FACTOR: int +IMWRITE_PNG_COMPRESSION: int +IMWRITE_PNG_STRATEGY: int +IMWRITE_PNG_BILEVEL: int +IMWRITE_PXM_BINARY: int +IMWRITE_EXR_TYPE: int +IMWRITE_EXR_COMPRESSION: int +IMWRITE_EXR_DWA_COMPRESSION_LEVEL: int +IMWRITE_WEBP_QUALITY: int +IMWRITE_HDR_COMPRESSION: int +IMWRITE_PAM_TUPLETYPE: int +IMWRITE_TIFF_RESUNIT: int +IMWRITE_TIFF_XDPI: int +IMWRITE_TIFF_YDPI: int +IMWRITE_TIFF_COMPRESSION: int +IMWRITE_JPEG2000_COMPRESSION_X1000: int +ImwriteFlags = int # One of [IMWRITE_JPEG_QUALITY, IMWRITE_JPEG_PROGRESSIVE, IMWRITE_JPEG_OPTIMIZE, +# IMWRITE_JPEG_RST_INTERVAL, IMWRITE_JPEG_LUMA_QUALITY, IMWRITE_JPEG_CHROMA_QUALITY, IMWRITE_JPEG_SAMPLING_FACTOR, +# IMWRITE_PNG_COMPRESSION, IMWRITE_PNG_STRATEGY, IMWRITE_PNG_BILEVEL, IMWRITE_PXM_BINARY, IMWRITE_EXR_TYPE, +# IMWRITE_EXR_COMPRESSION, IMWRITE_EXR_DWA_COMPRESSION_LEVEL, IMWRITE_WEBP_QUALITY, IMWRITE_HDR_COMPRESSION, +# IMWRITE_PAM_TUPLETYPE, IMWRITE_TIFF_RESUNIT, IMWRITE_TIFF_XDPI, IMWRITE_TIFF_YDPI, IMWRITE_TIFF_COMPRESSION, +# IMWRITE_JPEG2000_COMPRESSION_X1000] + +IMWRITE_JPEG_SAMPLING_FACTOR_411: int +IMWRITE_JPEG_SAMPLING_FACTOR_420: int +IMWRITE_JPEG_SAMPLING_FACTOR_422: int +IMWRITE_JPEG_SAMPLING_FACTOR_440: int +IMWRITE_JPEG_SAMPLING_FACTOR_444: int +# One of [IMWRITE_JPEG_SAMPLING_FACTOR_411, +# IMWRITE_JPEG_SAMPLING_FACTOR_420, IMWRITE_JPEG_SAMPLING_FACTOR_422, +# IMWRITE_JPEG_SAMPLING_FACTOR_440, IMWRITE_JPEG_SAMPLING_FACTOR_444] +ImwriteJPEGSamplingFactorParams = int + +IMWRITE_EXR_TYPE_HALF: int +IMWRITE_EXR_TYPE_FLOAT: int +ImwriteEXRTypeFlags = int # One of [IMWRITE_EXR_TYPE_HALF, IMWRITE_EXR_TYPE_FLOAT] + +IMWRITE_EXR_COMPRESSION_NO: int +IMWRITE_EXR_COMPRESSION_RLE: int +IMWRITE_EXR_COMPRESSION_ZIPS: int +IMWRITE_EXR_COMPRESSION_ZIP: int +IMWRITE_EXR_COMPRESSION_PIZ: int +IMWRITE_EXR_COMPRESSION_PXR24: int +IMWRITE_EXR_COMPRESSION_B44: int +IMWRITE_EXR_COMPRESSION_B44A: int +IMWRITE_EXR_COMPRESSION_DWAA: int +IMWRITE_EXR_COMPRESSION_DWAB: int +# One of [IMWRITE_EXR_COMPRESSION_NO, IMWRITE_EXR_COMPRESSION_RLE, +# IMWRITE_EXR_COMPRESSION_ZIPS, IMWRITE_EXR_COMPRESSION_ZIP, +# IMWRITE_EXR_COMPRESSION_PIZ, IMWRITE_EXR_COMPRESSION_PXR24, +# IMWRITE_EXR_COMPRESSION_B44, IMWRITE_EXR_COMPRESSION_B44A, +# IMWRITE_EXR_COMPRESSION_DWAA, IMWRITE_EXR_COMPRESSION_DWAB] +ImwriteEXRCompressionFlags = int + +IMWRITE_PNG_STRATEGY_DEFAULT: int +IMWRITE_PNG_STRATEGY_FILTERED: int +IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY: int +IMWRITE_PNG_STRATEGY_RLE: int +IMWRITE_PNG_STRATEGY_FIXED: int +# One of [IMWRITE_PNG_STRATEGY_DEFAULT, IMWRITE_PNG_STRATEGY_FILTERED, +# IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, IMWRITE_PNG_STRATEGY_RLE, +# IMWRITE_PNG_STRATEGY_FIXED] +ImwritePNGFlags = int + +IMWRITE_PAM_FORMAT_NULL: int +IMWRITE_PAM_FORMAT_BLACKANDWHITE: int +IMWRITE_PAM_FORMAT_GRAYSCALE: int +IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA: int +IMWRITE_PAM_FORMAT_RGB: int +IMWRITE_PAM_FORMAT_RGB_ALPHA: int +# One of [IMWRITE_PAM_FORMAT_NULL, IMWRITE_PAM_FORMAT_BLACKANDWHITE, +# IMWRITE_PAM_FORMAT_GRAYSCALE, IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA, +# IMWRITE_PAM_FORMAT_RGB, IMWRITE_PAM_FORMAT_RGB_ALPHA] +ImwritePAMFlags = int + +IMWRITE_HDR_COMPRESSION_NONE: int +IMWRITE_HDR_COMPRESSION_RLE: int +ImwriteHDRCompressionFlags = int # One of [IMWRITE_HDR_COMPRESSION_NONE, IMWRITE_HDR_COMPRESSION_RLE] + +CAP_ANY: int +CAP_VFW: int +CAP_V4L: int +CAP_V4L2: int +CAP_FIREWIRE: int +CAP_FIREWARE: int +CAP_IEEE1394: int +CAP_DC1394: int +CAP_CMU1394: int +CAP_QT: int +CAP_UNICAP: int +CAP_DSHOW: int +CAP_PVAPI: int +CAP_OPENNI: int +CAP_OPENNI_ASUS: int +CAP_ANDROID: int +CAP_XIAPI: int +CAP_AVFOUNDATION: int +CAP_GIGANETIX: int +CAP_MSMF: int +CAP_WINRT: int +CAP_INTELPERC: int +CAP_REALSENSE: int +CAP_OPENNI2: int +CAP_OPENNI2_ASUS: int +CAP_OPENNI2_ASTRA: int +CAP_GPHOTO2: int +CAP_GSTREAMER: int +CAP_FFMPEG: int +CAP_IMAGES: int +CAP_ARAVIS: int +CAP_OPENCV_MJPEG: int +CAP_INTEL_MFX: int +CAP_XINE: int +CAP_UEYE: int +CAP_OBSENSOR: int +VideoCaptureAPIs = int # One of [CAP_ANY, CAP_VFW, CAP_V4L, CAP_V4L2, CAP_FIREWIRE, CAP_FIREWARE, CAP_IEEE1394, +# CAP_DC1394, CAP_CMU1394, CAP_QT, CAP_UNICAP, CAP_DSHOW, CAP_PVAPI, CAP_OPENNI, CAP_OPENNI_ASUS, CAP_ANDROID, +# CAP_XIAPI, CAP_AVFOUNDATION, CAP_GIGANETIX, CAP_MSMF, CAP_WINRT, CAP_INTELPERC, CAP_REALSENSE, CAP_OPENNI2, +# CAP_OPENNI2_ASUS, CAP_OPENNI2_ASTRA, CAP_GPHOTO2, CAP_GSTREAMER, CAP_FFMPEG, CAP_IMAGES, CAP_ARAVIS, CAP_OPENCV_MJPEG, +# CAP_INTEL_MFX, CAP_XINE, CAP_UEYE, CAP_OBSENSOR] + +CAP_PROP_POS_MSEC: int +CAP_PROP_POS_FRAMES: int +CAP_PROP_POS_AVI_RATIO: int +CAP_PROP_FRAME_WIDTH: int +CAP_PROP_FRAME_HEIGHT: int +CAP_PROP_FPS: int +CAP_PROP_FOURCC: int +CAP_PROP_FRAME_COUNT: int +CAP_PROP_FORMAT: int +CAP_PROP_MODE: int +CAP_PROP_BRIGHTNESS: int +CAP_PROP_CONTRAST: int +CAP_PROP_SATURATION: int +CAP_PROP_HUE: int +CAP_PROP_GAIN: int +CAP_PROP_EXPOSURE: int +CAP_PROP_CONVERT_RGB: int +CAP_PROP_WHITE_BALANCE_BLUE_U: int +CAP_PROP_RECTIFICATION: int +CAP_PROP_MONOCHROME: int +CAP_PROP_SHARPNESS: int +CAP_PROP_AUTO_EXPOSURE: int +CAP_PROP_GAMMA: int +CAP_PROP_TEMPERATURE: int +CAP_PROP_TRIGGER: int +CAP_PROP_TRIGGER_DELAY: int +CAP_PROP_WHITE_BALANCE_RED_V: int +CAP_PROP_ZOOM: int +CAP_PROP_FOCUS: int +CAP_PROP_GUID: int +CAP_PROP_ISO_SPEED: int +CAP_PROP_BACKLIGHT: int +CAP_PROP_PAN: int +CAP_PROP_TILT: int +CAP_PROP_ROLL: int +CAP_PROP_IRIS: int +CAP_PROP_SETTINGS: int +CAP_PROP_BUFFERSIZE: int +CAP_PROP_AUTOFOCUS: int +CAP_PROP_SAR_NUM: int +CAP_PROP_SAR_DEN: int +CAP_PROP_BACKEND: int +CAP_PROP_CHANNEL: int +CAP_PROP_AUTO_WB: int +CAP_PROP_WB_TEMPERATURE: int +CAP_PROP_CODEC_PIXEL_FORMAT: int +CAP_PROP_BITRATE: int +CAP_PROP_ORIENTATION_META: int +CAP_PROP_ORIENTATION_AUTO: int +CAP_PROP_HW_ACCELERATION: int +CAP_PROP_HW_DEVICE: int +CAP_PROP_HW_ACCELERATION_USE_OPENCL: int +CAP_PROP_OPEN_TIMEOUT_MSEC: int +CAP_PROP_READ_TIMEOUT_MSEC: int +CAP_PROP_STREAM_OPEN_TIME_USEC: int +CAP_PROP_VIDEO_TOTAL_CHANNELS: int +CAP_PROP_VIDEO_STREAM: int +CAP_PROP_AUDIO_STREAM: int +CAP_PROP_AUDIO_POS: int +CAP_PROP_AUDIO_SHIFT_NSEC: int +CAP_PROP_AUDIO_DATA_DEPTH: int +CAP_PROP_AUDIO_SAMPLES_PER_SECOND: int +CAP_PROP_AUDIO_BASE_INDEX: int +CAP_PROP_AUDIO_TOTAL_CHANNELS: int +CAP_PROP_AUDIO_TOTAL_STREAMS: int +CAP_PROP_AUDIO_SYNCHRONIZE: int +CAP_PROP_LRF_HAS_KEY_FRAME: int +CAP_PROP_CODEC_EXTRADATA_INDEX: int +CAP_PROP_FRAME_TYPE: int +CAP_PROP_N_THREADS: int +VideoCaptureProperties = int # One of [CAP_PROP_POS_MSEC, CAP_PROP_POS_FRAMES, CAP_PROP_POS_AVI_RATIO, +# CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT, CAP_PROP_FPS, CAP_PROP_FOURCC, CAP_PROP_FRAME_COUNT, CAP_PROP_FORMAT, +# CAP_PROP_MODE, CAP_PROP_BRIGHTNESS, CAP_PROP_CONTRAST, CAP_PROP_SATURATION, CAP_PROP_HUE, CAP_PROP_GAIN, +# CAP_PROP_EXPOSURE, CAP_PROP_CONVERT_RGB, CAP_PROP_WHITE_BALANCE_BLUE_U, CAP_PROP_RECTIFICATION, CAP_PROP_MONOCHROME, +# CAP_PROP_SHARPNESS, CAP_PROP_AUTO_EXPOSURE, CAP_PROP_GAMMA, CAP_PROP_TEMPERATURE, CAP_PROP_TRIGGER, +# CAP_PROP_TRIGGER_DELAY, CAP_PROP_WHITE_BALANCE_RED_V, CAP_PROP_ZOOM, CAP_PROP_FOCUS, CAP_PROP_GUID, +# CAP_PROP_ISO_SPEED, CAP_PROP_BACKLIGHT, CAP_PROP_PAN, CAP_PROP_TILT, CAP_PROP_ROLL, CAP_PROP_IRIS, CAP_PROP_SETTINGS, +# CAP_PROP_BUFFERSIZE, CAP_PROP_AUTOFOCUS, CAP_PROP_SAR_NUM, CAP_PROP_SAR_DEN, CAP_PROP_BACKEND, CAP_PROP_CHANNEL, +# CAP_PROP_AUTO_WB, CAP_PROP_WB_TEMPERATURE, CAP_PROP_CODEC_PIXEL_FORMAT, CAP_PROP_BITRATE, CAP_PROP_ORIENTATION_META, +# CAP_PROP_ORIENTATION_AUTO, CAP_PROP_HW_ACCELERATION, CAP_PROP_HW_DEVICE, CAP_PROP_HW_ACCELERATION_USE_OPENCL, +# CAP_PROP_OPEN_TIMEOUT_MSEC, CAP_PROP_READ_TIMEOUT_MSEC, CAP_PROP_STREAM_OPEN_TIME_USEC, CAP_PROP_VIDEO_TOTAL_CHANNELS, +# CAP_PROP_VIDEO_STREAM, CAP_PROP_AUDIO_STREAM, CAP_PROP_AUDIO_POS, CAP_PROP_AUDIO_SHIFT_NSEC, +# CAP_PROP_AUDIO_DATA_DEPTH, CAP_PROP_AUDIO_SAMPLES_PER_SECOND, CAP_PROP_AUDIO_BASE_INDEX, +# CAP_PROP_AUDIO_TOTAL_CHANNELS, CAP_PROP_AUDIO_TOTAL_STREAMS, CAP_PROP_AUDIO_SYNCHRONIZE, CAP_PROP_LRF_HAS_KEY_FRAME, +# CAP_PROP_CODEC_EXTRADATA_INDEX, CAP_PROP_FRAME_TYPE, CAP_PROP_N_THREADS] + +VIDEOWRITER_PROP_QUALITY: int +VIDEOWRITER_PROP_FRAMEBYTES: int +VIDEOWRITER_PROP_NSTRIPES: int +VIDEOWRITER_PROP_IS_COLOR: int +VIDEOWRITER_PROP_DEPTH: int +VIDEOWRITER_PROP_HW_ACCELERATION: int +VIDEOWRITER_PROP_HW_DEVICE: int +VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL: int +# One of [VIDEOWRITER_PROP_QUALITY, VIDEOWRITER_PROP_FRAMEBYTES, +# VIDEOWRITER_PROP_NSTRIPES, VIDEOWRITER_PROP_IS_COLOR, +# VIDEOWRITER_PROP_DEPTH, VIDEOWRITER_PROP_HW_ACCELERATION, +# VIDEOWRITER_PROP_HW_DEVICE, VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL] +VideoWriterProperties = int + +VIDEO_ACCELERATION_NONE: int +VIDEO_ACCELERATION_ANY: int +VIDEO_ACCELERATION_D3D11: int +VIDEO_ACCELERATION_VAAPI: int +VIDEO_ACCELERATION_MFX: int +# One of [VIDEO_ACCELERATION_NONE, VIDEO_ACCELERATION_ANY, +# VIDEO_ACCELERATION_D3D11, VIDEO_ACCELERATION_VAAPI, +# VIDEO_ACCELERATION_MFX] +VideoAccelerationType = int + +CAP_OBSENSOR_DEPTH_MAP: int +CAP_OBSENSOR_BGR_IMAGE: int +CAP_OBSENSOR_IR_IMAGE: int +VideoCaptureOBSensorDataType = int # One of [CAP_OBSENSOR_DEPTH_MAP, CAP_OBSENSOR_BGR_IMAGE, CAP_OBSENSOR_IR_IMAGE] + +CAP_OBSENSOR_DEPTH_GENERATOR: int +CAP_OBSENSOR_IMAGE_GENERATOR: int +CAP_OBSENSOR_IR_GENERATOR: int +CAP_OBSENSOR_GENERATORS_MASK: int +# One of [CAP_OBSENSOR_DEPTH_GENERATOR, CAP_OBSENSOR_IMAGE_GENERATOR, +# CAP_OBSENSOR_IR_GENERATOR, CAP_OBSENSOR_GENERATORS_MASK] +VideoCaptureOBSensorGenerators = int + +CAP_PROP_OBSENSOR_INTRINSIC_FX: int +CAP_PROP_OBSENSOR_INTRINSIC_FY: int +CAP_PROP_OBSENSOR_INTRINSIC_CX: int +CAP_PROP_OBSENSOR_INTRINSIC_CY: int +# One of [CAP_PROP_OBSENSOR_INTRINSIC_FX, CAP_PROP_OBSENSOR_INTRINSIC_FY, +# CAP_PROP_OBSENSOR_INTRINSIC_CX, CAP_PROP_OBSENSOR_INTRINSIC_CY] +VideoCaptureOBSensorProperties = int + +SOLVEPNP_ITERATIVE: int +SOLVEPNP_EPNP: int +SOLVEPNP_P3P: int +SOLVEPNP_DLS: int +SOLVEPNP_UPNP: int +SOLVEPNP_AP3P: int +SOLVEPNP_IPPE: int +SOLVEPNP_IPPE_SQUARE: int +SOLVEPNP_SQPNP: int +SOLVEPNP_MAX_COUNT: int +# One of [SOLVEPNP_ITERATIVE, SOLVEPNP_EPNP, SOLVEPNP_P3P, SOLVEPNP_DLS, +# SOLVEPNP_UPNP, SOLVEPNP_AP3P, SOLVEPNP_IPPE, SOLVEPNP_IPPE_SQUARE, +# SOLVEPNP_SQPNP, SOLVEPNP_MAX_COUNT] +SolvePnPMethod = int + +CALIB_HAND_EYE_TSAI: int +CALIB_HAND_EYE_PARK: int +CALIB_HAND_EYE_HORAUD: int +CALIB_HAND_EYE_ANDREFF: int +CALIB_HAND_EYE_DANIILIDIS: int +# One of [CALIB_HAND_EYE_TSAI, CALIB_HAND_EYE_PARK, CALIB_HAND_EYE_HORAUD, +# CALIB_HAND_EYE_ANDREFF, CALIB_HAND_EYE_DANIILIDIS] +HandEyeCalibrationMethod = int + +CALIB_ROBOT_WORLD_HAND_EYE_SHAH: int +CALIB_ROBOT_WORLD_HAND_EYE_LI: int +RobotWorldHandEyeCalibrationMethod = int # One of [CALIB_ROBOT_WORLD_HAND_EYE_SHAH, CALIB_ROBOT_WORLD_HAND_EYE_LI] + +SAMPLING_UNIFORM: int +SAMPLING_PROGRESSIVE_NAPSAC: int +SAMPLING_NAPSAC: int +SAMPLING_PROSAC: int +SamplingMethod = int # One of [SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC, SAMPLING_PROSAC] + +LOCAL_OPTIM_NULL: int +LOCAL_OPTIM_INNER_LO: int +LOCAL_OPTIM_INNER_AND_ITER_LO: int +LOCAL_OPTIM_GC: int +LOCAL_OPTIM_SIGMA: int +# One of [LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO, LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA] +LocalOptimMethod = int + +SCORE_METHOD_RANSAC: int +SCORE_METHOD_MSAC: int +SCORE_METHOD_MAGSAC: int +SCORE_METHOD_LMEDS: int +ScoreMethod = int # One of [SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS] + +NEIGH_FLANN_KNN: int +NEIGH_GRID: int +NEIGH_FLANN_RADIUS: int +NeighborSearchMethod = int # One of [NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS] + +PROJ_SPHERICAL_ORTHO: int +PROJ_SPHERICAL_EQRECT: int +UndistortTypes = int # One of [PROJ_SPHERICAL_ORTHO, PROJ_SPHERICAL_EQRECT] + +WINDOW_NORMAL: int +WINDOW_AUTOSIZE: int +WINDOW_OPENGL: int +WINDOW_FULLSCREEN: int +WINDOW_FREERATIO: int +WINDOW_KEEPRATIO: int +WINDOW_GUI_EXPANDED: int +WINDOW_GUI_NORMAL: int +# One of [WINDOW_NORMAL, WINDOW_AUTOSIZE, WINDOW_OPENGL, +# WINDOW_FULLSCREEN, WINDOW_FREERATIO, WINDOW_KEEPRATIO, +# WINDOW_GUI_EXPANDED, WINDOW_GUI_NORMAL] +WindowFlags = int + +WND_PROP_FULLSCREEN: int +WND_PROP_AUTOSIZE: int +WND_PROP_ASPECT_RATIO: int +WND_PROP_OPENGL: int +WND_PROP_VISIBLE: int +WND_PROP_TOPMOST: int +WND_PROP_VSYNC: int +# One of [WND_PROP_FULLSCREEN, WND_PROP_AUTOSIZE, WND_PROP_ASPECT_RATIO, +# WND_PROP_OPENGL, WND_PROP_VISIBLE, WND_PROP_TOPMOST, WND_PROP_VSYNC] +WindowPropertyFlags = int + +EVENT_MOUSEMOVE: int +EVENT_LBUTTONDOWN: int +EVENT_RBUTTONDOWN: int +EVENT_MBUTTONDOWN: int +EVENT_LBUTTONUP: int +EVENT_RBUTTONUP: int +EVENT_MBUTTONUP: int +EVENT_LBUTTONDBLCLK: int +EVENT_RBUTTONDBLCLK: int +EVENT_MBUTTONDBLCLK: int +EVENT_MOUSEWHEEL: int +EVENT_MOUSEHWHEEL: int +# One of [EVENT_MOUSEMOVE, EVENT_LBUTTONDOWN, EVENT_RBUTTONDOWN, +# EVENT_MBUTTONDOWN, EVENT_LBUTTONUP, EVENT_RBUTTONUP, EVENT_MBUTTONUP, +# EVENT_LBUTTONDBLCLK, EVENT_RBUTTONDBLCLK, EVENT_MBUTTONDBLCLK, +# EVENT_MOUSEWHEEL, EVENT_MOUSEHWHEEL] +MouseEventTypes = int + +EVENT_FLAG_LBUTTON: int +EVENT_FLAG_RBUTTON: int +EVENT_FLAG_MBUTTON: int +EVENT_FLAG_CTRLKEY: int +EVENT_FLAG_SHIFTKEY: int +EVENT_FLAG_ALTKEY: int +# One of [EVENT_FLAG_LBUTTON, EVENT_FLAG_RBUTTON, EVENT_FLAG_MBUTTON, +# EVENT_FLAG_CTRLKEY, EVENT_FLAG_SHIFTKEY, EVENT_FLAG_ALTKEY] +MouseEventFlags = int + +QT_FONT_LIGHT: int +QT_FONT_NORMAL: int +QT_FONT_DEMIBOLD: int +QT_FONT_BOLD: int +QT_FONT_BLACK: int +QtFontWeights = int # One of [QT_FONT_LIGHT, QT_FONT_NORMAL, QT_FONT_DEMIBOLD, QT_FONT_BOLD, QT_FONT_BLACK] + +QT_STYLE_NORMAL: int +QT_STYLE_ITALIC: int +QT_STYLE_OBLIQUE: int +QtFontStyles = int # One of [QT_STYLE_NORMAL, QT_STYLE_ITALIC, QT_STYLE_OBLIQUE] + +QT_PUSH_BUTTON: int +QT_CHECKBOX: int +QT_RADIOBOX: int +QT_NEW_BUTTONBAR: int +QtButtonTypes = int # One of [QT_PUSH_BUTTON, QT_CHECKBOX, QT_RADIOBOX, QT_NEW_BUTTONBAR] + +GShape_GMAT: int +GShape_GSCALAR: int +GShape_GARRAY: int +GShape_GOPAQUE: int +GShape_GFRAME: int +GShape = int # One of [GMAT, GSCALAR, GARRAY, GOPAQUE, GFRAME] + +MediaFormat_BGR: int +MediaFormat_NV12: int +MediaFormat_GRAY: int +MediaFormat = int # One of [BGR, NV12, GRAY] + + +FileStorage_READ: int +FileStorage_WRITE: int +FileStorage_APPEND: int +FileStorage_MEMORY: int +FileStorage_FORMAT_MASK: int +FileStorage_FORMAT_AUTO: int +FileStorage_FORMAT_XML: int +FileStorage_FORMAT_YAML: int +FileStorage_FORMAT_JSON: int +FileStorage_BASE64: int +FileStorage_WRITE_BASE64: int +# One of [READ, WRITE, APPEND, MEMORY, FORMAT_MASK, FORMAT_AUTO, +# FORMAT_XML, FORMAT_YAML, FORMAT_JSON, BASE64, WRITE_BASE64] +FileStorage_Mode = int + +FileStorage_UNDEFINED: int +FileStorage_VALUE_EXPECTED: int +FileStorage_NAME_EXPECTED: int +FileStorage_INSIDE_MAP: int +FileStorage_State = int # One of [UNDEFINED, VALUE_EXPECTED, NAME_EXPECTED, INSIDE_MAP] + +FileNode_NONE: int +FileNode_INT: int +FileNode_REAL: int +FileNode_FLOAT: int +FileNode_STR: int +FileNode_STRING: int +FileNode_SEQ: int +FileNode_MAP: int +FileNode_TYPE_MASK: int +FileNode_FLOW: int +FileNode_UNIFORM: int +FileNode_EMPTY: int +FileNode_NAMED: int + +UMat_MAGIC_VAL: int +UMat_AUTO_STEP: int +UMat_CONTINUOUS_FLAG: int +UMat_SUBMATRIX_FLAG: int +UMat_MAGIC_MASK: int +UMat_TYPE_MASK: int +UMat_DEPTH_MASK: int + +Subdiv2D_PTLOC_ERROR: int +Subdiv2D_PTLOC_OUTSIDE_RECT: int +Subdiv2D_PTLOC_INSIDE: int +Subdiv2D_PTLOC_VERTEX: int +Subdiv2D_PTLOC_ON_EDGE: int +Subdiv2D_NEXT_AROUND_ORG: int +Subdiv2D_NEXT_AROUND_DST: int +Subdiv2D_PREV_AROUND_ORG: int +Subdiv2D_PREV_AROUND_DST: int +Subdiv2D_NEXT_AROUND_LEFT: int +Subdiv2D_NEXT_AROUND_RIGHT: int +Subdiv2D_PREV_AROUND_LEFT: int +Subdiv2D_PREV_AROUND_RIGHT: int + +ORB_HARRIS_SCORE: int +ORB_FAST_SCORE: int +ORB_ScoreType = int # One of [HARRIS_SCORE, FAST_SCORE] + +FastFeatureDetector_TYPE_5_8: int +FastFeatureDetector_TYPE_7_12: int +FastFeatureDetector_TYPE_9_16: int +FastFeatureDetector_DetectorType = int # One of [TYPE_5_8, TYPE_7_12, TYPE_9_16] + +FastFeatureDetector_THRESHOLD: int +FastFeatureDetector_NONMAX_SUPPRESSION: int +FastFeatureDetector_FAST_N: int + +AgastFeatureDetector_AGAST_5_8: int +AgastFeatureDetector_AGAST_7_12d: int +AgastFeatureDetector_AGAST_7_12s: int +AgastFeatureDetector_OAST_9_16: int +AgastFeatureDetector_DetectorType = int # One of [AGAST_5_8, AGAST_7_12d, AGAST_7_12s, OAST_9_16] + +AgastFeatureDetector_THRESHOLD: int +AgastFeatureDetector_NONMAX_SUPPRESSION: int + +KAZE_DIFF_PM_G1: int +KAZE_DIFF_PM_G2: int +KAZE_DIFF_WEICKERT: int +KAZE_DIFF_CHARBONNIER: int +KAZE_DiffusivityType = int # One of [DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT, DIFF_CHARBONNIER] + +AKAZE_DESCRIPTOR_KAZE_UPRIGHT: int +AKAZE_DESCRIPTOR_KAZE: int +AKAZE_DESCRIPTOR_MLDB_UPRIGHT: int +AKAZE_DESCRIPTOR_MLDB: int +# One of [DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_KAZE, DESCRIPTOR_MLDB_UPRIGHT, DESCRIPTOR_MLDB] +AKAZE_DescriptorType = int + +DescriptorMatcher_FLANNBASED: int +DescriptorMatcher_BRUTEFORCE: int +DescriptorMatcher_BRUTEFORCE_L1: int +DescriptorMatcher_BRUTEFORCE_HAMMING: int +DescriptorMatcher_BRUTEFORCE_HAMMINGLUT: int +DescriptorMatcher_BRUTEFORCE_SL2: int +# One of [FLANNBASED, BRUTEFORCE, BRUTEFORCE_L1, BRUTEFORCE_HAMMING, BRUTEFORCE_HAMMINGLUT, BRUTEFORCE_SL2] +DescriptorMatcher_MatcherType = int + +CirclesGridFinderParameters_SYMMETRIC_GRID: int +CirclesGridFinderParameters_ASYMMETRIC_GRID: int +CirclesGridFinderParameters_GridType = int # One of [SYMMETRIC_GRID, ASYMMETRIC_GRID] + +StereoMatcher_DISP_SHIFT: int +StereoMatcher_DISP_SCALE: int + +StereoBM_PREFILTER_NORMALIZED_RESPONSE: int +StereoBM_PREFILTER_XSOBEL: int + +StereoSGBM_MODE_SGBM: int +StereoSGBM_MODE_HH: int +StereoSGBM_MODE_SGBM_3WAY: int +StereoSGBM_MODE_HH4: int + +HOGDescriptor_L2Hys: int +HOGDescriptor_HistogramNormType = int # One of [L2Hys] + +HOGDescriptor_DEFAULT_NLEVELS: int + +HOGDescriptor_DESCR_FORMAT_COL_BY_COL: int +HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW: int +HOGDescriptor_DescriptorStorageFormat = int # One of [DESCR_FORMAT_COL_BY_COL, DESCR_FORMAT_ROW_BY_ROW] + +QRCodeEncoder_MODE_AUTO: int +QRCodeEncoder_MODE_NUMERIC: int +QRCodeEncoder_MODE_ALPHANUMERIC: int +QRCodeEncoder_MODE_BYTE: int +QRCodeEncoder_MODE_ECI: int +QRCodeEncoder_MODE_KANJI: int +QRCodeEncoder_MODE_STRUCTURED_APPEND: int +# One of [MODE_AUTO, MODE_NUMERIC, MODE_ALPHANUMERIC, MODE_BYTE, MODE_ECI, MODE_KANJI, MODE_STRUCTURED_APPEND] +QRCodeEncoder_EncodeMode = int + +QRCodeEncoder_CORRECT_LEVEL_L: int +QRCodeEncoder_CORRECT_LEVEL_M: int +QRCodeEncoder_CORRECT_LEVEL_Q: int +QRCodeEncoder_CORRECT_LEVEL_H: int +QRCodeEncoder_CorrectionLevel = int # One of [CORRECT_LEVEL_L, CORRECT_LEVEL_M, CORRECT_LEVEL_Q, CORRECT_LEVEL_H] + +QRCodeEncoder_ECI_UTF8: int +QRCodeEncoder_ECIEncodings = int # One of [ECI_UTF8] + +FaceRecognizerSF_FR_COSINE: int +FaceRecognizerSF_FR_NORM_L2: int +FaceRecognizerSF_DisType = int # One of [FR_COSINE, FR_NORM_L2] + +Stitcher_OK: int +Stitcher_ERR_NEED_MORE_IMGS: int +Stitcher_ERR_HOMOGRAPHY_EST_FAIL: int +Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL: int +Stitcher_Status = int # One of [OK, ERR_NEED_MORE_IMGS, ERR_HOMOGRAPHY_EST_FAIL, ERR_CAMERA_PARAMS_ADJUST_FAIL] + +Stitcher_PANORAMA: int +Stitcher_SCANS: int +Stitcher_Mode = int # One of [PANORAMA, SCANS] + +DISOpticalFlow_PRESET_ULTRAFAST: int +DISOpticalFlow_PRESET_FAST: int +DISOpticalFlow_PRESET_MEDIUM: int + +PCA_DATA_AS_ROW: int +PCA_DATA_AS_COL: int +PCA_USE_AVG: int +PCA_Flags = int # One of [DATA_AS_ROW, DATA_AS_COL, USE_AVG] + +SVD_MODIFY_A: int +SVD_NO_UV: int +SVD_FULL_UV: int +SVD_Flags = int # One of [MODIFY_A, NO_UV, FULL_UV] + +RNG_UNIFORM: int +RNG_NORMAL: int + +Formatter_FMT_DEFAULT: int +Formatter_FMT_MATLAB: int +Formatter_FMT_CSV: int +Formatter_FMT_PYTHON: int +Formatter_FMT_NUMPY: int +Formatter_FMT_C: int +Formatter_FormatType = int # One of [FMT_DEFAULT, FMT_MATLAB, FMT_CSV, FMT_PYTHON, FMT_NUMPY, FMT_C] + +_InputArray_KIND_SHIFT: int +_InputArray_FIXED_TYPE: int +_InputArray_FIXED_SIZE: int +_InputArray_KIND_MASK: int +_InputArray_NONE: int +_InputArray_MAT: int +_InputArray_MATX: int +_InputArray_STD_VECTOR: int +_InputArray_STD_VECTOR_VECTOR: int +_InputArray_STD_VECTOR_MAT: int +_InputArray_EXPR: int +_InputArray_OPENGL_BUFFER: int +_InputArray_CUDA_HOST_MEM: int +_InputArray_CUDA_GPU_MAT: int +_InputArray_UMAT: int +_InputArray_STD_VECTOR_UMAT: int +_InputArray_STD_BOOL_VECTOR: int +_InputArray_STD_VECTOR_CUDA_GPU_MAT: int +_InputArray_STD_ARRAY: int +_InputArray_STD_ARRAY_MAT: int +# One of [KIND_SHIFT, FIXED_TYPE, FIXED_SIZE, KIND_MASK, NONE, MAT, MATX, +# STD_VECTOR, STD_VECTOR_VECTOR, STD_VECTOR_MAT, EXPR, OPENGL_BUFFER, +# CUDA_HOST_MEM, CUDA_GPU_MAT, UMAT, STD_VECTOR_UMAT, STD_BOOL_VECTOR, +# STD_VECTOR_CUDA_GPU_MAT, STD_ARRAY, STD_ARRAY_MAT] +_InputArray_KindFlag = int + +_OutputArray_DEPTH_MASK_8U: int +_OutputArray_DEPTH_MASK_8S: int +_OutputArray_DEPTH_MASK_16U: int +_OutputArray_DEPTH_MASK_16S: int +_OutputArray_DEPTH_MASK_32S: int +_OutputArray_DEPTH_MASK_32F: int +_OutputArray_DEPTH_MASK_64F: int +_OutputArray_DEPTH_MASK_16F: int +_OutputArray_DEPTH_MASK_ALL: int +_OutputArray_DEPTH_MASK_ALL_BUT_8S: int +_OutputArray_DEPTH_MASK_ALL_16F: int +_OutputArray_DEPTH_MASK_FLT: int +# One of [DEPTH_MASK_8U, DEPTH_MASK_8S, DEPTH_MASK_16U, DEPTH_MASK_16S, +# DEPTH_MASK_32S, DEPTH_MASK_32F, DEPTH_MASK_64F, DEPTH_MASK_16F, +# DEPTH_MASK_ALL, DEPTH_MASK_ALL_BUT_8S, DEPTH_MASK_ALL_16F, +# DEPTH_MASK_FLT] +_OutputArray_DepthMask = int + +UMatData_COPY_ON_MAP: int +UMatData_HOST_COPY_OBSOLETE: int +UMatData_DEVICE_COPY_OBSOLETE: int +UMatData_TEMP_UMAT: int +UMatData_TEMP_COPIED_UMAT: int +UMatData_USER_ALLOCATED: int +UMatData_DEVICE_MEM_MAPPED: int +UMatData_ASYNC_CLEANUP: int +# One of [COPY_ON_MAP, HOST_COPY_OBSOLETE, DEVICE_COPY_OBSOLETE, +# TEMP_UMAT, TEMP_COPIED_UMAT, USER_ALLOCATED, DEVICE_MEM_MAPPED, +# ASYNC_CLEANUP] +UMatData_MemoryFlag = int + +Mat_MAGIC_VAL: int +Mat_AUTO_STEP: int +Mat_CONTINUOUS_FLAG: int +Mat_SUBMATRIX_FLAG: int +Mat_MAGIC_MASK: int +Mat_TYPE_MASK: int +Mat_DEPTH_MASK: int + +SparseMat_MAGIC_VAL: int +SparseMat_MAX_DIM: int +SparseMat_HASH_SCALE: int +SparseMat_HASH_BIT: int + +QuatEnum_INT_XYZ: int +QuatEnum_INT_XZY: int +QuatEnum_INT_YXZ: int +QuatEnum_INT_YZX: int +QuatEnum_INT_ZXY: int +QuatEnum_INT_ZYX: int +QuatEnum_INT_XYX: int +QuatEnum_INT_XZX: int +QuatEnum_INT_YXY: int +QuatEnum_INT_YZY: int +QuatEnum_INT_ZXZ: int +QuatEnum_INT_ZYZ: int +QuatEnum_EXT_XYZ: int +QuatEnum_EXT_XZY: int +QuatEnum_EXT_YXZ: int +QuatEnum_EXT_YZX: int +QuatEnum_EXT_ZXY: int +QuatEnum_EXT_ZYX: int +QuatEnum_EXT_XYX: int +QuatEnum_EXT_XZX: int +QuatEnum_EXT_YXY: int +QuatEnum_EXT_YZY: int +QuatEnum_EXT_ZXZ: int +QuatEnum_EXT_ZYZ: int +QuatEnum_EULER_ANGLES_MAX_VALUE: int +# One of [INT_XYZ, INT_XZY, INT_YXZ, INT_YZX, INT_ZXY, INT_ZYX, INT_XYX, +# INT_XZX, INT_YXY, INT_YZY, INT_ZXZ, INT_ZYZ, EXT_XYZ, EXT_XZY, EXT_YXZ, +# EXT_YZX, EXT_ZXY, EXT_ZYX, EXT_XYX, EXT_XZX, EXT_YXY, EXT_YZY, EXT_ZXZ, +# EXT_ZYZ, EULER_ANGLES_MAX_VALUE] +QuatEnum_EulerAnglesType = int + +TermCriteria_COUNT: int +TermCriteria_MAX_ITER: int +TermCriteria_EPS: int +TermCriteria_Type = int # One of [COUNT, MAX_ITER, EPS] + +GFluidKernel_Kind_Filter: int +GFluidKernel_Kind_Resize: int +GFluidKernel_Kind_YUV420toRGB: int +GFluidKernel_Kind = int # One of [Filter, Resize, YUV420toRGB] + +MediaFrame_Access_R: int +MediaFrame_Access_W: int +MediaFrame_Access = int # One of [R, W] + +RMat_Access_R: int +RMat_Access_W: int +RMat_Access = int # One of [R, W] + + +# Classes +class Algorithm: + # Functions + def clear(self) -> None: ... + + @typing.overload + def write(self, fs: FileStorage) -> None: ... + @typing.overload + def write(self, fs: FileStorage, name: str) -> None: ... + + def read(self, fn: FileNode) -> None: ... + + def empty(self) -> bool: ... + + def save(self, filename: str) -> None: ... + + def getDefaultName(self) -> str: ... + + +class AsyncArray: + # Functions + def __init__(self) -> None: ... + + def release(self) -> None: ... + + @typing.overload + def get(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @typing.overload + def get(self, dst: UMat | None = ...) -> UMat: ... + @typing.overload + def get(self, timeoutNs: float, dst: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... + @typing.overload + def get(self, timeoutNs: float, dst: UMat | None = ...) -> tuple[bool, UMat]: ... + + def wait_for(self, timeoutNs: float) -> bool: ... + + def valid(self) -> bool: ... + + +class FileStorage: + # Functions + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, filename: str, flags: int, encoding: str = ...) -> None: ... + + def open(self, filename: str, flags: int, encoding: str = ...) -> bool: ... + + def isOpened(self) -> bool: ... + + def release(self) -> None: ... + + def releaseAndGetString(self) -> str: ... + + def getFirstTopLevelNode(self) -> FileNode: ... + + def root(self, streamidx: int = ...) -> FileNode: ... + + def getNode(self, nodename: str) -> FileNode: ... + + @typing.overload + def write(self, name: str, val: int) -> None: ... + @typing.overload + def write(self, name: str, val: float) -> None: ... + @typing.overload + def write(self, name: str, val: str) -> None: ... + @typing.overload + def write(self, name: str, val: cv2.typing.MatLike) -> None: ... + @typing.overload + def write(self, name: str, val: typing.Sequence[str]) -> None: ... + + def writeComment(self, comment: str, append: bool = ...) -> None: ... + + def startWriteStruct(self, name: str, flags: int, typeName: str = ...) -> None: ... + + def endWriteStruct(self) -> None: ... + + def getFormat(self) -> int: ... + + +class FileNode: + # Functions + def __init__(self) -> None: ... + + def getNode(self, nodename: str) -> FileNode: ... + + def at(self, i: int) -> FileNode: ... + + def keys(self) -> typing.Sequence[str]: ... + + def type(self) -> int: ... + + def empty(self) -> bool: ... + + def isNone(self) -> bool: ... + + def isSeq(self) -> bool: ... + + def isMap(self) -> bool: ... + + def isInt(self) -> bool: ... + + def isReal(self) -> bool: ... + + def isString(self) -> bool: ... + + def isNamed(self) -> bool: ... + + def name(self) -> str: ... + + def size(self) -> int: ... + + def rawSize(self) -> int: ... + + def real(self) -> float: ... + + def string(self) -> str: ... + + def mat(self) -> cv2.typing.MatLike: ... + + +class KeyPoint: + pt: cv2.typing.Point2f + size: float + angle: float + response: float + octave: int + class_id: int + + # Functions + @typing.overload + def __init__(self) -> None: ... + + @typing.overload + def __init__( + self, + x: float, + y: float, + size: float, + angle: float = ..., + response: float = ..., + octave: int = ..., + class_id: int = ..., + ) -> None: ... + + @staticmethod + @typing.overload + def convert( + keypoints: typing.Sequence[KeyPoint], keypointIndexes: typing.Sequence[int] + = ..., + ) -> typing.Sequence[cv2.typing.Point2f]: ... + + @staticmethod + @typing.overload + def convert( + points2f: typing.Sequence[cv2.typing.Point2f], + size: float = ..., + response: float = ..., + octave: int = ..., + class_id: int = ..., + ) -> typing.Sequence[KeyPoint]: ... + + @staticmethod + def overlap(kp1: KeyPoint, kp2: KeyPoint) -> float: ... + + +class DMatch: + queryIdx: int + trainIdx: int + imgIdx: int + distance: float + + # Functions + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, _queryIdx: int, _trainIdx: int, _distance: float) -> None: ... + @typing.overload + def __init__(self, _queryIdx: int, _trainIdx: int, _imgIdx: int, _distance: float) -> None: ... + + +class TickMeter: + # Functions + def __init__(self) -> None: ... + + def start(self) -> None: ... + + def stop(self) -> None: ... + + def getTimeTicks(self) -> int: ... + + def getTimeMicro(self) -> float: ... + + def getTimeMilli(self) -> float: ... + + def getTimeSec(self) -> float: ... + + def getCounter(self) -> int: ... + + def getFPS(self) -> float: ... + + def getAvgTimeSec(self) -> float: ... + + def getAvgTimeMilli(self) -> float: ... + + def reset(self) -> None: ... + + +class UMat: + offset: int + + # Functions + @typing.overload + def __init__(self, usageFlags: UMatUsageFlags = ...) -> None: ... + @typing.overload + def __init__(self, rows: int, cols: int, type: int, usageFlags: UMatUsageFlags = ...) -> None: ... + @typing.overload + def __init__(self, size: cv2.typing.Size, type: int, usageFlags: UMatUsageFlags = ...) -> None: ... + + @typing.overload + def __init__( + self, + rows: int, + cols: int, + type: int, + s: cv2.typing.Scalar, + usageFlags: UMatUsageFlags = ..., + ) -> None: ... + + @typing.overload + def __init__( + self, + size: cv2.typing.Size, + type: int, + s: cv2.typing.Scalar, + usageFlags: UMatUsageFlags = ..., + ) -> None: ... + + @typing.overload + def __init__(self, m: UMat) -> None: ... + @typing.overload + def __init__(self, m: UMat, rowRange: cv2.typing.Range, colRange: cv2.typing.Range = ...) -> None: ... + @typing.overload + def __init__(self, m: UMat, roi: cv2.typing.Rect) -> None: ... + @typing.overload + def __init__(self, m: UMat, ranges: typing.Sequence[cv2.typing.Range]) -> None: ... + + @staticmethod + def queue() -> cv2.typing.IntPointer: ... + + @staticmethod + def context() -> cv2.typing.IntPointer: ... + + def get(self) -> cv2.typing.MatLike: ... + + def isContinuous(self) -> bool: ... + + def isSubmatrix(self) -> bool: ... + + def handle(self, accessFlags: AccessFlag) -> cv2.typing.IntPointer: ... + + +class Subdiv2D: + # Functions + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, rect: cv2.typing.Rect) -> None: ... + + def initDelaunay(self, rect: cv2.typing.Rect) -> None: ... + + @typing.overload + def insert(self, pt: cv2.typing.Point2f) -> int: ... + @typing.overload + def insert(self, ptvec: typing.Sequence[cv2.typing.Point2f]) -> None: ... + + def locate(self, pt: cv2.typing.Point2f) -> tuple[int, int, int]: ... + + def findNearest(self, pt: cv2.typing.Point2f) -> tuple[int, cv2.typing.Point2f]: ... + + def getEdgeList(self) -> typing.Sequence[cv2.typing.Vec4f]: ... + + def getLeadingEdgeList(self) -> typing.Sequence[int]: ... + + def getTriangleList(self) -> typing.Sequence[cv2.typing.Vec6f]: ... + + def getVoronoiFacetList( + self, + idx: typing.Sequence[int], + ) -> tuple[ + typing.Sequence[typing.Sequence[cv2.typing.Point2f]], + typing.Sequence[cv2.typing.Point2f], + ]: ... + + def getVertex(self, vertex: int) -> tuple[cv2.typing.Point2f, int]: ... + + def getEdge(self, edge: int, nextEdgeType: int) -> int: ... + + def nextEdge(self, edge: int) -> int: ... + + def rotateEdge(self, edge: int, rotate: int) -> int: ... + + def symEdge(self, edge: int) -> int: ... + + def edgeOrg(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ... + + def edgeDst(self, edge: int) -> tuple[int, cv2.typing.Point2f]: ... + + +class Feature2D: + # Functions + @typing.overload + def detect(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> typing.Sequence[KeyPoint]: ... + @typing.overload + def detect(self, image: UMat, mask: UMat | None = ...) -> typing.Sequence[KeyPoint]: ... + + @typing.overload + def detect( + self, + images: typing.Sequence[cv2.typing.MatLike], + masks: typing.Sequence[cv2.typing.MatLike] | None = ..., + ) -> typing.Sequence[typing.Sequence[KeyPoint]]: ... + + @typing.overload + def detect( + self, images: typing.Sequence[UMat], masks: typing.Sequence[UMat] + | None = ..., + ) -> typing.Sequence[typing.Sequence[KeyPoint]]: ... + + @typing.overload + def compute( + self, + image: cv2.typing.MatLike, + keypoints: typing.Sequence[KeyPoint], + descriptors: cv2.typing.MatLike | None = ..., + ) -> tuple[ + typing.Sequence[KeyPoint], + cv2.typing.MatLike, + ]: ... + + @typing.overload + def compute( + self, + image: UMat, + keypoints: typing.Sequence[KeyPoint], + descriptors: UMat | None = ..., + ) -> tuple[ + typing.Sequence[KeyPoint], + UMat, + ]: ... + + @typing.overload + def compute( + self, + images: typing.Sequence[cv2.typing.MatLike], + keypoints: typing.Sequence[typing.Sequence[KeyPoint]], + descriptors: typing.Sequence[cv2.typing.MatLike] | None = ..., + ) -> tuple[ + typing.Sequence[typing.Sequence[KeyPoint]], + typing.Sequence[cv2.typing.MatLike], + ]: ... + + @typing.overload + def compute( + self, + images: typing.Sequence[UMat], + keypoints: typing.Sequence[typing.Sequence[KeyPoint]], + descriptors: typing.Sequence[UMat] | None = ..., + ) -> tuple[ + typing.Sequence[typing.Sequence[KeyPoint]], + typing.Sequence[UMat], + ]: ... + + @typing.overload + def detectAndCompute( + self, + image: cv2.typing.MatLike, + mask: cv2.typing.MatLike, + descriptors: cv2.typing.MatLike | None = ..., + useProvidedKeypoints: bool = ..., + ) -> tuple[ + typing.Sequence[KeyPoint], + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detectAndCompute( + self, + image: UMat, + mask: UMat, + descriptors: UMat | None = ..., + useProvidedKeypoints: bool = ..., + ) -> tuple[ + typing.Sequence[KeyPoint], + UMat, + ]: ... + + def descriptorSize(self) -> int: ... + + def descriptorType(self) -> int: ... + + def defaultNorm(self) -> int: ... + + @typing.overload + def write(self, fileName: str) -> None: ... + @typing.overload + def write(self, fs: FileStorage, name: str) -> None: ... + + @typing.overload + def read(self, fileName: str) -> None: ... + @typing.overload + def read(self, arg1: FileNode) -> None: ... + + def empty(self) -> bool: ... + + def getDefaultName(self) -> str: ... + + +class BOWTrainer: + # Functions + def add(self, descriptors: cv2.typing.MatLike) -> None: ... + + def getDescriptors(self) -> typing.Sequence[cv2.typing.MatLike]: ... + + def descriptorsCount(self) -> int: ... + + def clear(self) -> None: ... + + @typing.overload + def cluster(self) -> cv2.typing.MatLike: ... + @typing.overload + def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + + +class BOWImgDescriptorExtractor: + # Functions + def __init__(self, dextractor: cv2.typing.DescriptorExtractor, dmatcher: DescriptorMatcher) -> None: ... + + def setVocabulary(self, vocabulary: cv2.typing.MatLike) -> None: ... + + def getVocabulary(self) -> cv2.typing.MatLike: ... + + def compute( + self, + image: cv2.typing.MatLike, + keypoints: typing.Sequence[KeyPoint], + imgDescriptor: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + def descriptorSize(self) -> int: ... + + def descriptorType(self) -> int: ... + + +class VideoCapture: + # Functions + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, filename: str, apiPreference: int = ...) -> None: ... + @typing.overload + def __init__(self, filename: str, apiPreference: int, params: typing.Sequence[int]) -> None: ... + @typing.overload + def __init__(self, index: int, apiPreference: int = ...) -> None: ... + @typing.overload + def __init__(self, index: int, apiPreference: int, params: typing.Sequence[int]) -> None: ... + + @typing.overload + def open(self, filename: str, apiPreference: int = ...) -> bool: ... + @typing.overload + def open(self, filename: str, apiPreference: int, params: typing.Sequence[int]) -> bool: ... + @typing.overload + def open(self, index: int, apiPreference: int = ...) -> bool: ... + @typing.overload + def open(self, index: int, apiPreference: int, params: typing.Sequence[int]) -> bool: ... + + def isOpened(self) -> bool: ... + + def release(self) -> None: ... + + def grab(self) -> bool: ... + + @typing.overload + def retrieve(self, image: cv2.typing.MatLike | None = ..., flag: int = ...) -> tuple[bool, cv2.typing.MatLike]: ... + @typing.overload + def retrieve(self, image: UMat | None = ..., flag: int = ...) -> tuple[bool, UMat]: ... + + @typing.overload + def read(self, image: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ... + @typing.overload + def read(self, image: UMat | None = ...) -> tuple[bool, UMat]: ... + + def set(self, propId: int, value: float) -> bool: ... + + def get(self, propId: int) -> float: ... + + def getBackendName(self) -> str: ... + + def setExceptionMode(self, enable: bool) -> None: ... + + def getExceptionMode(self) -> bool: ... + + @staticmethod + def waitAny(streams: typing.Sequence[VideoCapture], timeoutNs: int = ...) -> tuple[bool, typing.Sequence[int]]: ... + + +class VideoWriter: + # Functions + @typing.overload + def __init__(self) -> None: ... + + @typing.overload + def __init__( + self, + filename: str, + fourcc: int, + fps: float, + frameSize: cv2.typing.Size, + isColor: bool = ..., + ) -> None: ... + + @typing.overload + def __init__( + self, + filename: str, + apiPreference: int, + fourcc: int, + fps: float, + frameSize: cv2.typing.Size, + isColor: bool = ..., + ) -> None: ... + + @typing.overload + def __init__( + self, + filename: str, + fourcc: int, + fps: float, + frameSize: cv2.typing.Size, + params: typing.Sequence[int], + ) -> None: ... + + @typing.overload + def __init__( + self, filename: str, apiPreference: int, fourcc: int, fps: float, + frameSize: cv2.typing.Size, params: typing.Sequence[int], + ) -> None: ... + + @typing.overload + def open(self, filename: str, fourcc: int, fps: float, frameSize: cv2.typing.Size, isColor: bool = ...) -> bool: ... + + @typing.overload + def open( + self, filename: str, apiPreference: int, fourcc: int, fps: float, + frameSize: cv2.typing.Size, isColor: bool = ..., + ) -> bool: ... + + @typing.overload + def open( + self, + filename: str, + fourcc: int, + fps: float, + frameSize: cv2.typing.Size, + params: typing.Sequence[int], + ) -> bool: ... + + @typing.overload + def open( + self, filename: str, apiPreference: int, fourcc: int, fps: float, + frameSize: cv2.typing.Size, params: typing.Sequence[int], + ) -> bool: ... + + def isOpened(self) -> bool: ... + + def release(self) -> None: ... + + @typing.overload + def write(self, image: cv2.typing.MatLike) -> None: ... + @typing.overload + def write(self, image: UMat) -> None: ... + + def set(self, propId: int, value: float) -> bool: ... + + def get(self, propId: int) -> float: ... + + @staticmethod + def fourcc(c1: str, c2: str, c3: str, c4: str) -> int: ... + + def getBackendName(self) -> str: ... + + +class UsacParams: + confidence: float + isParallel: bool + loIterations: int + loMethod: LocalOptimMethod + loSampleSize: int + maxIterations: int + neighborsSearch: NeighborSearchMethod + randomGeneratorState: int + sampler: SamplingMethod + score: ScoreMethod + threshold: float + + # Functions + def __init__(self) -> None: ... + + +class CirclesGridFinderParameters: + densityNeighborhoodSize: cv2.typing.Size2f + minDensity: float + kmeansAttempts: int + minDistanceToAddKeypoint: int + keypointScale: int + minGraphConfidence: float + vertexGain: float + vertexPenalty: float + existingVertexGain: float + edgeGain: float + edgePenalty: float + convexHullFactor: float + minRNGEdgeSwitchDist: float + squareSize: float + maxRectifiedDistance: float + + # Functions + def __init__(self) -> None: ... + + +class CascadeClassifier: + # Functions + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, filename: str) -> None: ... + + def empty(self) -> bool: ... + + def load(self, filename: str) -> bool: ... + + def read(self, node: FileNode) -> bool: ... + + @typing.overload + def detectMultiScale( + self, + image: cv2.typing.MatLike, + scaleFactor: float = ..., + minNeighbors: int = ..., + flags: int = ..., + minSize: cv2.typing.Size = ..., + maxSize: cv2.typing.Size = ..., + ) -> typing.Sequence[cv2.typing.Rect]: ... + + @typing.overload + def detectMultiScale( + self, + image: UMat, + scaleFactor: float = ..., + minNeighbors: int = ..., + flags: int = ..., + minSize: cv2.typing.Size = ..., + maxSize: cv2.typing.Size = ..., + ) -> typing.Sequence[cv2.typing.Rect]: ... + + @typing.overload + def detectMultiScale2( + self, + image: cv2.typing.MatLike, + scaleFactor: float = ..., + minNeighbors: int = ..., + flags: int = ..., + minSize: cv2.typing.Size = ..., + maxSize: cv2.typing.Size = ..., + ) -> tuple[ + typing.Sequence[cv2.typing.Rect], + typing.Sequence[int], + ]: ... + + @typing.overload + def detectMultiScale2( + self, + image: UMat, + scaleFactor: float = ..., + minNeighbors: int = ..., + flags: int = ..., + minSize: cv2.typing.Size = ..., + maxSize: cv2.typing.Size = ..., + ) -> tuple[ + typing.Sequence[cv2.typing.Rect], + typing.Sequence[int], + ]: ... + + @typing.overload + def detectMultiScale3( + self, + image: cv2.typing.MatLike, + scaleFactor: float = ..., + minNeighbors: int = ..., + flags: int = ..., + minSize: cv2.typing.Size = ..., + maxSize: cv2.typing.Size = ..., + outputRejectLevels: bool = ..., + ) -> tuple[ + typing.Sequence[cv2.typing.Rect], + typing.Sequence[int], + typing.Sequence[float], + ]: ... + + @typing.overload + def detectMultiScale3( + self, + image: UMat, + scaleFactor: float = ..., + minNeighbors: int = ..., + flags: int = ..., + minSize: cv2.typing.Size = ..., + maxSize: cv2.typing.Size = ..., + outputRejectLevels: bool = ..., + ) -> tuple[ + typing.Sequence[cv2.typing.Rect], + typing.Sequence[int], + typing.Sequence[float], + ]: ... + + def isOldFormatCascade(self) -> bool: ... + + def getOriginalWindowSize(self) -> cv2.typing.Size: ... + + def getFeatureType(self) -> int: ... + + @staticmethod + def convert(oldcascade: str, newcascade: str) -> bool: ... + + +class HOGDescriptor: + @property + def winSize(self) -> cv2.typing.Size: ... + @property + def blockSize(self) -> cv2.typing.Size: ... + @property + def blockStride(self) -> cv2.typing.Size: ... + @property + def cellSize(self) -> cv2.typing.Size: ... + @property + def nbins(self) -> int: ... + @property + def derivAperture(self) -> int: ... + @property + def winSigma(self) -> float: ... + @property + def histogramNormType(self) -> HOGDescriptor_HistogramNormType: ... + @property + def L2HysThreshold(self) -> float: ... + @property + def gammaCorrection(self) -> bool: ... + @property + def svmDetector(self) -> typing.Sequence[float]: ... + @property + def nlevels(self) -> int: ... + @property + def signedGradient(self) -> bool: ... + + # Functions + @typing.overload + def __init__(self) -> None: ... + + @typing.overload + def __init__( + self, + _winSize: cv2.typing.Size, + _blockSize: cv2.typing.Size, + _blockStride: cv2.typing.Size, + _cellSize: cv2.typing.Size, + _nbins: int, + _derivAperture: int = ..., + _winSigma: float = ..., + _histogramNormType: HOGDescriptor_HistogramNormType = ..., + _L2HysThreshold: float = ..., + _gammaCorrection: bool = ..., + _nlevels: int = ..., + _signedGradient: bool = ..., + ) -> None: ... + + @typing.overload + def __init__(self, filename: str) -> None: ... + + def getDescriptorSize(self) -> int: ... + + def checkDetectorSize(self) -> bool: ... + + def getWinSigma(self) -> float: ... + + @typing.overload + def setSVMDetector(self, svmdetector: cv2.typing.MatLike) -> None: ... + @typing.overload + def setSVMDetector(self, svmdetector: UMat) -> None: ... + + def load(self, filename: str, objname: str = ...) -> bool: ... + + def save(self, filename: str, objname: str = ...) -> None: ... + + @typing.overload + def compute( + self, img: cv2.typing.MatLike, winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., + locations: typing.Sequence[cv2.typing.Point] = ..., + ) -> typing.Sequence[float]: ... + + @typing.overload + def compute( + self, img: UMat, winStride: cv2.typing.Size = ..., padding: cv2.typing.Size = ..., + locations: typing.Sequence[cv2.typing.Point] = ..., + ) -> typing.Sequence[float]: ... + + @typing.overload + def detect( + self, + img: cv2.typing.MatLike, + hitThreshold: float = ..., + winStride: cv2.typing.Size = ..., + padding: cv2.typing.Size = ..., + searchLocations: typing.Sequence[cv2.typing.Point] = ..., + ) -> tuple[ + typing.Sequence[cv2.typing.Point], + typing.Sequence[float], + ]: ... + + @typing.overload + def detect( + self, + img: UMat, + hitThreshold: float = ..., + winStride: cv2.typing.Size = ..., + padding: cv2.typing.Size = ..., + searchLocations: typing.Sequence[cv2.typing.Point] = ..., + ) -> tuple[ + typing.Sequence[cv2.typing.Point], + typing.Sequence[float], + ]: ... + + @typing.overload + def detectMultiScale( + self, + img: cv2.typing.MatLike, + hitThreshold: float = ..., + winStride: cv2.typing.Size = ..., + padding: cv2.typing.Size = ..., + scale: float = ..., + groupThreshold: float = ..., + useMeanshiftGrouping: bool = ..., + ) -> tuple[ + typing.Sequence[cv2.typing.Rect], + typing.Sequence[float], + ]: ... + + @typing.overload + def detectMultiScale( + self, + img: UMat, + hitThreshold: float = ..., + winStride: cv2.typing.Size = ..., + padding: cv2.typing.Size = ..., + scale: float = ..., + groupThreshold: float = ..., + useMeanshiftGrouping: bool = ..., + ) -> tuple[ + typing.Sequence[cv2.typing.Rect], + typing.Sequence[float], + ]: ... + + @typing.overload + def computeGradient( + self, + img: cv2.typing.MatLike, + grad: cv2.typing.MatLike, + angleOfs: cv2.typing.MatLike, + paddingTL: cv2.typing.Size = ..., + paddingBR: cv2.typing.Size = ..., + ) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def computeGradient( + self, + img: UMat, + grad: UMat, + angleOfs: UMat, + paddingTL: cv2.typing.Size = ..., + paddingBR: cv2.typing.Size = ..., + ) -> tuple[ + UMat, + UMat, + ]: ... + + @staticmethod + def getDefaultPeopleDetector() -> typing.Sequence[float]: ... + + @staticmethod + def getDaimlerPeopleDetector() -> typing.Sequence[float]: ... + + +class QRCodeEncoder: + # Classes + class Params: + version: int + correction_level: QRCodeEncoder_CorrectionLevel + mode: QRCodeEncoder_EncodeMode + structure_number: int + + # Functions + def __init__(self) -> None: ... + + # Functions + + @classmethod + def create(cls, parameters: QRCodeEncoder.Params = ...) -> QRCodeEncoder: ... + + @typing.overload + def encode(self, encoded_info: str, qrcode: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @typing.overload + def encode(self, encoded_info: str, qrcode: UMat | None = ...) -> UMat: ... + + @typing.overload + def encodeStructuredAppend( + self, encoded_info: str, qrcodes: typing.Sequence[cv2.typing.MatLike] | None = ..., + ) -> typing.Sequence[cv2.typing.MatLike]: ... + + @typing.overload + def encodeStructuredAppend( + self, + encoded_info: str, + qrcodes: typing.Sequence[UMat] | None = ..., + ) -> typing.Sequence[UMat]: ... + + +class QRCodeDetector: + # Functions + def __init__(self) -> None: ... + + def setEpsX(self, epsX: float) -> None: ... + + def setEpsY(self, epsY: float) -> None: ... + + def setUseAlignmentMarkers(self, useAlignmentMarkers: bool) -> None: ... + + @typing.overload + def detect( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike | None = ..., + ) -> tuple[ + bool, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detect(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ... + + @typing.overload + def decode( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike, + straight_qrcode: cv2.typing.MatLike | None = ..., + ) -> tuple[ + str, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def decode(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ... + + @typing.overload + def decodeCurved( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike, + straight_qrcode: cv2.typing.MatLike | None = ..., + ) -> tuple[ + str, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def decodeCurved(self, img: UMat, points: UMat, straight_qrcode: UMat | None = ...) -> tuple[str, UMat]: ... + + @typing.overload + def detectAndDecode( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike | None = ..., + straight_qrcode: cv2.typing.MatLike | None = ..., + ) -> tuple[ + str, + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detectAndDecode( + self, + img: UMat, + points: UMat | None = ..., + straight_qrcode: UMat | None = ..., + ) -> tuple[ + str, + UMat, + UMat, + ]: ... + + @typing.overload + def detectAndDecodeCurved( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike | None = ..., + straight_qrcode: cv2.typing.MatLike | None = ..., + ) -> tuple[ + str, + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detectAndDecodeCurved( + self, + img: UMat, + points: UMat | None = ..., + straight_qrcode: UMat | None = ..., + ) -> tuple[ + str, + UMat, + UMat, + ]: ... + + @typing.overload + def detectMulti( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike | None = ..., + ) -> tuple[ + bool, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detectMulti(self, img: UMat, points: UMat | None = ...) -> tuple[bool, UMat]: ... + + @typing.overload + def decodeMulti( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike, + straight_qrcode: typing.Sequence[cv2.typing.MatLike] | None = ..., + ) -> tuple[ + bool, + typing.Sequence[str], + typing.Sequence[cv2.typing.MatLike], + ]: ... + + @typing.overload + def decodeMulti( + self, + img: UMat, + points: UMat, + straight_qrcode: typing.Sequence[UMat] | None = ..., + ) -> tuple[ + bool, + typing.Sequence[str], + typing.Sequence[UMat], + ]: ... + + @typing.overload + def detectAndDecodeMulti( + self, + img: cv2.typing.MatLike, + points: cv2.typing.MatLike | None = ..., + straight_qrcode: typing.Sequence[cv2.typing.MatLike] | None = ..., + ) -> tuple[ + bool, + typing.Sequence[str], + cv2.typing.MatLike, + typing.Sequence[cv2.typing.MatLike], + ]: ... + + @typing.overload + def detectAndDecodeMulti( + self, + img: UMat, + points: UMat | None = ..., + straight_qrcode: typing.Sequence[UMat] | None = ..., + ) -> tuple[ + bool, + typing.Sequence[str], + UMat, + typing.Sequence[UMat], + ]: ... + + +class FaceDetectorYN: + # Functions + def setInputSize(self, input_size: cv2.typing.Size) -> None: ... + + def getInputSize(self) -> cv2.typing.Size: ... + + def setScoreThreshold(self, score_threshold: float) -> None: ... + + def getScoreThreshold(self) -> float: ... + + def setNMSThreshold(self, nms_threshold: float) -> None: ... + + def getNMSThreshold(self) -> float: ... + + def setTopK(self, top_k: int) -> None: ... + + def getTopK(self) -> int: ... + + @typing.overload + def detect( + self, + image: cv2.typing.MatLike, + faces: cv2.typing.MatLike | None = ..., + ) -> tuple[ + int, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detect(self, image: UMat, faces: UMat | None = ...) -> tuple[int, UMat]: ... + + @classmethod + def create( + cls, + model: str, + config: str, + input_size: cv2.typing.Size, + score_threshold: float = ..., + nms_threshold: float = ..., + top_k: int = ..., + backend_id: int = ..., + target_id: int = ..., + ) -> FaceDetectorYN: ... + + +class FaceRecognizerSF: + # Functions + @typing.overload + def alignCrop( + self, src_img: cv2.typing.MatLike, face_box: cv2.typing.MatLike, + aligned_img: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def alignCrop(self, src_img: UMat, face_box: UMat, aligned_img: UMat | None = ...) -> UMat: ... + + @typing.overload + def feature( + self, aligned_img: cv2.typing.MatLike, + face_feature: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def feature(self, aligned_img: UMat, face_feature: UMat | None = ...) -> UMat: ... + + @typing.overload + def match( + self, + face_feature1: cv2.typing.MatLike, + face_feature2: cv2.typing.MatLike, + dis_type: int = ..., + ) -> float: ... + + @typing.overload + def match(self, face_feature1: UMat, face_feature2: UMat, dis_type: int = ...) -> float: ... + + @classmethod + def create(cls, model: str, config: str, backend_id: int = ..., target_id: int = ...) -> FaceRecognizerSF: ... + + +class Stitcher: + # Functions + @classmethod + def create(cls, mode: Stitcher_Mode = ...) -> Stitcher: ... + + def registrationResol(self) -> float: ... + + def setRegistrationResol(self, resol_mpx: float) -> None: ... + + def seamEstimationResol(self) -> float: ... + + def setSeamEstimationResol(self, resol_mpx: float) -> None: ... + + def compositingResol(self) -> float: ... + + def setCompositingResol(self, resol_mpx: float) -> None: ... + + def panoConfidenceThresh(self) -> float: ... + + def setPanoConfidenceThresh(self, conf_thresh: float) -> None: ... + + def waveCorrection(self) -> bool: ... + + def setWaveCorrection(self, flag: bool) -> None: ... + + def interpolationFlags(self) -> InterpolationFlags: ... + + def setInterpolationFlags(self, interp_flags: InterpolationFlags) -> None: ... + + @typing.overload + def estimateTransform( + self, + images: typing.Sequence[cv2.typing.MatLike], + masks: typing.Sequence[cv2.typing.MatLike] | None = ..., + ) -> Stitcher_Status: ... + + @typing.overload + def estimateTransform( + self, + images: typing.Sequence[UMat], + masks: typing.Sequence[UMat] | None = ..., + ) -> Stitcher_Status: ... + + @typing.overload + def composePanorama(self, pano: cv2.typing.MatLike | None = ...) -> tuple[Stitcher_Status, cv2.typing.MatLike]: ... + @typing.overload + def composePanorama(self, pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... + + @typing.overload + def composePanorama( + self, + images: typing.Sequence[cv2.typing.MatLike], + pano: cv2.typing.MatLike | None = ..., + ) -> tuple[ + Stitcher_Status, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def composePanorama( + self, + images: typing.Sequence[UMat], + pano: UMat | None = ..., + ) -> tuple[ + Stitcher_Status, + UMat, + ]: ... + + @typing.overload + def stitch( + self, + images: typing.Sequence[cv2.typing.MatLike], + pano: cv2.typing.MatLike | None = ..., + ) -> tuple[ + Stitcher_Status, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def stitch(self, images: typing.Sequence[UMat], pano: UMat | None = ...) -> tuple[Stitcher_Status, UMat]: ... + + @typing.overload + def stitch( + self, + images: typing.Sequence[cv2.typing.MatLike], + masks: typing.Sequence[cv2.typing.MatLike], + pano: cv2.typing.MatLike | None = ..., + ) -> tuple[ + Stitcher_Status, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def stitch( + self, + images: typing.Sequence[UMat], + masks: typing.Sequence[UMat], + pano: UMat | None = ..., + ) -> tuple[ + Stitcher_Status, + UMat, + ]: ... + + def workScale(self) -> float: ... + + +class PyRotationWarper: + # Functions + @typing.overload + def __init__(self, type: str, scale: float) -> None: ... + @typing.overload + def __init__(self) -> None: ... + + @typing.overload + def warpPoint(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ... + @typing.overload + def warpPoint(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... + + @typing.overload + def warpPointBackward( + self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, + R: cv2.typing.MatLike, + ) -> cv2.typing.Point2f: ... + + @typing.overload + def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... + + @typing.overload + def warpPointBackward( + self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, + R: cv2.typing.MatLike, + ) -> cv2.typing.Point2f: ... + + @typing.overload + def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ... + + @typing.overload + def buildMaps( + self, + src_size: cv2.typing.Size, + K: cv2.typing.MatLike, + R: cv2.typing.MatLike, + xmap: cv2.typing.MatLike | None = ..., + ymap: cv2.typing.MatLike | None = ..., + ) -> tuple[ + cv2.typing.Rect, + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def buildMaps( + self, + src_size: cv2.typing.Size, + K: UMat, + R: UMat, + xmap: UMat | None = ..., + ymap: UMat | None = ..., + ) -> tuple[ + cv2.typing.Rect, + UMat, + UMat, + ]: ... + + @typing.overload + def warp( + self, + src: cv2.typing.MatLike, + K: cv2.typing.MatLike, + R: cv2.typing.MatLike, + interp_mode: int, + border_mode: int, + dst: cv2.typing.MatLike | None = ..., + ) -> tuple[ + cv2.typing.Point, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def warp( + self, + src: UMat, + K: UMat, + R: UMat, + interp_mode: int, + border_mode: int, + dst: UMat | None = ..., + ) -> tuple[ + cv2.typing.Point, + UMat, + ]: ... + + @typing.overload + def warpBackward( + self, + src: cv2.typing.MatLike, + K: cv2.typing.MatLike, + R: cv2.typing.MatLike, + interp_mode: int, + border_mode: int, + dst_size: cv2.typing.Size, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def warpBackward( + self, + src: UMat, + K: UMat, + R: UMat, + interp_mode: int, + border_mode: int, + dst_size: cv2.typing.Size, + dst: UMat | None = ..., + ) -> UMat: ... + + @typing.overload + def warpRoi(self, src_size: cv2.typing.Size, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Rect: ... + @typing.overload + def warpRoi(self, src_size: cv2.typing.Size, K: UMat, R: UMat) -> cv2.typing.Rect: ... + + def getScale(self) -> float: ... + + def setScale(self, arg1: float) -> None: ... + + +class WarperCreator: + ... + + +class KalmanFilter: + statePre: cv2.typing.MatLike + statePost: cv2.typing.MatLike + transitionMatrix: cv2.typing.MatLike + controlMatrix: cv2.typing.MatLike + measurementMatrix: cv2.typing.MatLike + processNoiseCov: cv2.typing.MatLike + measurementNoiseCov: cv2.typing.MatLike + errorCovPre: cv2.typing.MatLike + gain: cv2.typing.MatLike + errorCovPost: cv2.typing.MatLike + + # Functions + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, dynamParams: int, measureParams: int, controlParams: int = ..., type: int = ...) -> None: ... + + def predict(self, control: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + + def correct(self, measurement: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + + +class Tracker: + # Functions + @typing.overload + def init(self, image: cv2.typing.MatLike, boundingBox: cv2.typing.Rect) -> None: ... + @typing.overload + def init(self, image: UMat, boundingBox: cv2.typing.Rect) -> None: ... + + @typing.overload + def update(self, image: cv2.typing.MatLike) -> tuple[bool, cv2.typing.Rect]: ... + @typing.overload + def update(self, image: UMat) -> tuple[bool, cv2.typing.Rect]: ... + + +class GArrayDesc: + ... + + +class GComputation: + # Functions + @typing.overload + def __init__(self, ins: cv2.typing.GProtoInputArgs, outs: cv2.typing.GProtoOutputArgs) -> None: ... + @typing.overload + def __init__(self, in_: GMat, out: GMat) -> None: ... + @typing.overload + def __init__(self, in_: GMat, out: GScalar) -> None: ... + @typing.overload + def __init__(self, in1: GMat, in2: GMat, out: GMat) -> None: ... + + def apply( + self, callback: cv2.typing.ExtractArgsCallback, + args: typing.Sequence[GCompileArg] = ..., + ) -> typing.Sequence[cv2.typing.GRunArg]: ... + + @typing.overload + def compileStreaming( + self, + in_metas: typing.Sequence[cv2.typing.GMetaArg], + args: typing.Sequence[GCompileArg] = ..., + ) -> GStreamingCompiled: ... + + @typing.overload + def compileStreaming(self, args: typing.Sequence[GCompileArg] = ...) -> GStreamingCompiled: ... + + @typing.overload + def compileStreaming( + self, callback: cv2.typing.ExtractMetaCallback, + args: typing.Sequence[GCompileArg] = ..., + ) -> GStreamingCompiled: ... + + +class GFrame: + # Functions + def __init__(self) -> None: ... + + +class GKernelPackage: + # Functions + def size(self) -> int: ... + + +class GMat: + # Functions + def __init__(self) -> None: ... + + +class GMatDesc: + @property + def depth(self) -> int: ... + @property + def chan(self) -> int: ... + @property + def size(self) -> cv2.typing.Size: ... + @property + def planar(self) -> bool: ... + @property + def dims(self) -> typing.Sequence[int]: ... + + # Functions + @typing.overload + def __init__(self, d: int, c: int, s: cv2.typing.Size, p: bool = ...) -> None: ... + @typing.overload + def __init__(self, d: int, dd: typing.Sequence[int]) -> None: ... + @typing.overload + def __init__(self, d: int, dd: typing.Sequence[int]) -> None: ... + @typing.overload + def __init__(self) -> None: ... + + @typing.overload + def withSizeDelta(self, delta: cv2.typing.Size) -> GMatDesc: ... + @typing.overload + def withSizeDelta(self, dx: int, dy: int) -> GMatDesc: ... + + def withSize(self, sz: cv2.typing.Size) -> GMatDesc: ... + + def withDepth(self, ddepth: int) -> GMatDesc: ... + + def withType(self, ddepth: int, dchan: int) -> GMatDesc: ... + + @typing.overload + def asPlanar(self) -> GMatDesc: ... + @typing.overload + def asPlanar(self, planes: int) -> GMatDesc: ... + + def asInterleaved(self) -> GMatDesc: ... + + +class GOpaqueDesc: + ... + + +class GScalar: + # Functions + @typing.overload + def __init__(self) -> None: ... + @typing.overload + def __init__(self, s: cv2.typing.Scalar) -> None: ... + + +class GScalarDesc: + ... + + +class GStreamingCompiled: + # Functions + def __init__(self) -> None: ... + + def setSource(self, callback: cv2.typing.ExtractArgsCallback) -> None: ... + + def start(self) -> None: ... + + def pull(self) -> tuple[bool, typing.Sequence[cv2.typing.GRunArg] | typing.Sequence[cv2.typing.GOptRunArg]]: ... + + def stop(self) -> None: ... + + def running(self) -> bool: ... + + +class GOpaqueT: + # Functions + def __init__(self, type: cv2.gapi.ArgType) -> None: ... + + def type(self) -> cv2.gapi.ArgType: ... + + +class GArrayT: + # Functions + def __init__(self, type: cv2.gapi.ArgType) -> None: ... + + def type(self) -> cv2.gapi.ArgType: ... + + +class GCompileArg: + # Functions + @typing.overload + def __init__(self, arg: GKernelPackage) -> None: ... + @typing.overload + def __init__(self, arg: cv2.gapi.GNetPackage) -> None: ... + @typing.overload + def __init__(self, arg: cv2.gapi.streaming.queue_capacity) -> None: ... + + +class GInferInputs: + # Functions + def __init__(self) -> None: ... + + @typing.overload + def setInput(self, name: str, value: GMat) -> GInferInputs: ... + @typing.overload + def setInput(self, name: str, value: GFrame) -> GInferInputs: ... + + +class GInferListInputs: + # Functions + def __init__(self) -> None: ... + + @typing.overload + def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ... + @typing.overload + def setInput(self, name: str, value: GArrayT) -> GInferListInputs: ... + + +class GInferOutputs: + # Functions + def __init__(self) -> None: ... + + def at(self, name: str) -> GMat: ... + + +class GInferListOutputs: + # Functions + def __init__(self) -> None: ... + + def at(self, name: str) -> GArrayT: ... + + +class GeneralizedHough(Algorithm): + # Functions + @typing.overload + def setTemplate(self, templ: cv2.typing.MatLike, templCenter: cv2.typing.Point = ...) -> None: ... + @typing.overload + def setTemplate(self, templ: UMat, templCenter: cv2.typing.Point = ...) -> None: ... + + @typing.overload + def setTemplate( + self, + edges: cv2.typing.MatLike, + dx: cv2.typing.MatLike, + dy: cv2.typing.MatLike, + templCenter: cv2.typing.Point = ..., + ) -> None: ... + + @typing.overload + def setTemplate(self, edges: UMat, dx: UMat, dy: UMat, templCenter: cv2.typing.Point = ...) -> None: ... + + @typing.overload + def detect( + self, + image: cv2.typing.MatLike, + positions: cv2.typing.MatLike | None = ..., + votes: cv2.typing.MatLike | None = ..., + ) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detect(self, image: UMat, positions: UMat | None = ..., votes: UMat | None = ...) -> tuple[UMat, UMat]: ... + + @typing.overload + def detect( + self, + edges: cv2.typing.MatLike, + dx: cv2.typing.MatLike, + dy: cv2.typing.MatLike, + positions: cv2.typing.MatLike | None = ..., + votes: cv2.typing.MatLike | None = ..., + ) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detect( + self, + edges: UMat, + dx: UMat, + dy: UMat, + positions: UMat | None = ..., + votes: UMat | None = ..., + ) -> tuple[ + UMat, + UMat, + ]: ... + + def setCannyLowThresh(self, cannyLowThresh: int) -> None: ... + + def getCannyLowThresh(self) -> int: ... + + def setCannyHighThresh(self, cannyHighThresh: int) -> None: ... + + def getCannyHighThresh(self) -> int: ... + + def setMinDist(self, minDist: float) -> None: ... + + def getMinDist(self) -> float: ... + + def setDp(self, dp: float) -> None: ... + + def getDp(self) -> float: ... + + def setMaxBufferSize(self, maxBufferSize: int) -> None: ... + + def getMaxBufferSize(self) -> int: ... + + +class CLAHE(Algorithm): + # Functions + @typing.overload + def apply(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @typing.overload + def apply(self, src: UMat, dst: UMat | None = ...) -> UMat: ... + + def setClipLimit(self, clipLimit: float) -> None: ... + + def getClipLimit(self) -> float: ... + + def setTilesGridSize(self, tileGridSize: cv2.typing.Size) -> None: ... + + def getTilesGridSize(self) -> cv2.typing.Size: ... + + def collectGarbage(self) -> None: ... + + +class LineSegmentDetector(Algorithm): + # Functions + @typing.overload + def detect( + self, + image: cv2.typing.MatLike, + lines: cv2.typing.MatLike | None = ..., + width: cv2.typing.MatLike | None = ..., + prec: cv2.typing.MatLike | None = ..., + nfa: cv2.typing.MatLike | None = ..., + ) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def detect( + self, + image: UMat, + lines: UMat | None = ..., + width: UMat | None = ..., + prec: UMat | None = ..., + nfa: UMat | None = ..., + ) -> tuple[ + UMat, + UMat, + UMat, + UMat, + ]: ... + + @typing.overload + def drawSegments(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + @typing.overload + def drawSegments(self, image: UMat, lines: UMat) -> UMat: ... + + @typing.overload + def compareSegments( + self, + size: cv2.typing.Size, + lines1: cv2.typing.MatLike, + lines2: cv2.typing.MatLike, + image: cv2.typing.MatLike | None = ..., + ) -> tuple[ + int, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def compareSegments( + self, + size: cv2.typing.Size, + lines1: UMat, + lines2: UMat, + image: UMat | None = ..., + ) -> tuple[ + int, + UMat, + ]: ... + + +class Tonemap(Algorithm): + # Functions + @typing.overload + def process(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @typing.overload + def process(self, src: UMat, dst: UMat | None = ...) -> UMat: ... + + def getGamma(self) -> float: ... + + def setGamma(self, gamma: float) -> None: ... + + +class AlignExposures(Algorithm): + # Functions + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + dst: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + response: cv2.typing.MatLike, + ) -> None: ... + + @typing.overload + def process( + self, + src: typing.Sequence[UMat], + dst: typing.Sequence[cv2.typing.MatLike], + times: UMat, + response: UMat, + ) -> None: ... + + +class CalibrateCRF(Algorithm): + # Functions + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... + + +class MergeExposures(Algorithm): + # Functions + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + response: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... + + +class AffineFeature(Feature2D): + # Functions + @classmethod + def create( + cls, backend: Feature2D, maxTilt: int = ..., minTilt: int = ..., + tiltStep: float = ..., rotateStepBase: float = ..., + ) -> AffineFeature: ... + + def setViewParams(self, tilts: typing.Sequence[float], rolls: typing.Sequence[float]) -> None: ... + + def getViewParams(self, tilts: typing.Sequence[float], rolls: typing.Sequence[float]) -> None: ... + + def getDefaultName(self) -> str: ... + + +class SIFT(Feature2D): + # Functions + @classmethod + @typing.overload + def create( + cls, + nfeatures: int = ..., + nOctaveLayers: int = ..., + contrastThreshold: float = ..., + edgeThreshold: float = ..., + sigma: float = ..., + enable_precise_upscale: bool = ..., + ) -> SIFT: ... + + @classmethod + @typing.overload + def create( + cls, nfeatures: int, nOctaveLayers: int, contrastThreshold: float, edgeThreshold: float, + sigma: float, descriptorType: int, enable_precise_upscale: bool = ..., + ) -> SIFT: ... + + def getDefaultName(self) -> str: ... + + def setNFeatures(self, maxFeatures: int) -> None: ... + + def getNFeatures(self) -> int: ... + + def setNOctaveLayers(self, nOctaveLayers: int) -> None: ... + + def getNOctaveLayers(self) -> int: ... + + def setContrastThreshold(self, contrastThreshold: float) -> None: ... + + def getContrastThreshold(self) -> float: ... + + def setEdgeThreshold(self, edgeThreshold: float) -> None: ... + + def getEdgeThreshold(self) -> float: ... + + def setSigma(self, sigma: float) -> None: ... + + def getSigma(self) -> float: ... + + +class BRISK(Feature2D): + # Functions + @classmethod + @typing.overload + def create(cls, thresh: int = ..., octaves: int = ..., patternScale: float = ...) -> BRISK: ... + + @classmethod + @typing.overload + def create( + cls, + radiusList: typing.Sequence[float], + numberList: typing.Sequence[int], + dMax: float = ..., + dMin: float = ..., + indexChange: typing.Sequence[int] = ..., + ) -> BRISK: ... + + @classmethod + @typing.overload + def create( + cls, + thresh: int, + octaves: int, + radiusList: typing.Sequence[float], + numberList: typing.Sequence[int], + dMax: float = ..., + dMin: float = ..., + indexChange: typing.Sequence[int] = ..., + ) -> BRISK: ... + + def getDefaultName(self) -> str: ... + + def setThreshold(self, threshold: int) -> None: ... + + def getThreshold(self) -> int: ... + + def setOctaves(self, octaves: int) -> None: ... + + def getOctaves(self) -> int: ... + + def setPatternScale(self, patternScale: float) -> None: ... + + def getPatternScale(self) -> float: ... + + +class ORB(Feature2D): + # Functions + @classmethod + def create( + cls, + nfeatures: int = ..., + scaleFactor: float = ..., + nlevels: int = ..., + edgeThreshold: int = ..., + firstLevel: int = ..., + WTA_K: int = ..., + scoreType: ORB_ScoreType = ..., + patchSize: int = ..., + fastThreshold: int = ..., + ) -> ORB: ... + + def setMaxFeatures(self, maxFeatures: int) -> None: ... + + def getMaxFeatures(self) -> int: ... + + def setScaleFactor(self, scaleFactor: float) -> None: ... + + def getScaleFactor(self) -> float: ... + + def setNLevels(self, nlevels: int) -> None: ... + + def getNLevels(self) -> int: ... + + def setEdgeThreshold(self, edgeThreshold: int) -> None: ... + + def getEdgeThreshold(self) -> int: ... + + def setFirstLevel(self, firstLevel: int) -> None: ... + + def getFirstLevel(self) -> int: ... + + def setWTA_K(self, wta_k: int) -> None: ... + + def getWTA_K(self) -> int: ... + + def setScoreType(self, scoreType: ORB_ScoreType) -> None: ... + + def getScoreType(self) -> ORB_ScoreType: ... + + def setPatchSize(self, patchSize: int) -> None: ... + + def getPatchSize(self) -> int: ... + + def setFastThreshold(self, fastThreshold: int) -> None: ... + + def getFastThreshold(self) -> int: ... + + def getDefaultName(self) -> str: ... + + +class MSER(Feature2D): + # Functions + @classmethod + def create( + cls, + delta: int = ..., + min_area: int = ..., + max_area: int = ..., + max_variation: float = ..., + min_diversity: float = ..., + max_evolution: int = ..., + area_threshold: float = ..., + min_margin: float = ..., + edge_blur_size: int = ..., + ) -> MSER: ... + + @typing.overload + def detectRegions( + self, + image: cv2.typing.MatLike, + ) -> tuple[ + typing.Sequence[typing.Sequence[cv2.typing.Point]], + typing.Sequence[cv2.typing.Rect], + ]: ... + + @typing.overload + def detectRegions( + self, + image: UMat, + ) -> tuple[ + typing.Sequence[typing.Sequence[cv2.typing.Point]], + typing.Sequence[cv2.typing.Rect], + ]: ... + + def setDelta(self, delta: int) -> None: ... + + def getDelta(self) -> int: ... + + def setMinArea(self, minArea: int) -> None: ... + + def getMinArea(self) -> int: ... + + def setMaxArea(self, maxArea: int) -> None: ... + + def getMaxArea(self) -> int: ... + + def setMaxVariation(self, maxVariation: float) -> None: ... + + def getMaxVariation(self) -> float: ... + + def setMinDiversity(self, minDiversity: float) -> None: ... + + def getMinDiversity(self) -> float: ... + + def setMaxEvolution(self, maxEvolution: int) -> None: ... + + def getMaxEvolution(self) -> int: ... + + def setAreaThreshold(self, areaThreshold: float) -> None: ... + + def getAreaThreshold(self) -> float: ... + + def setMinMargin(self, min_margin: float) -> None: ... + + def getMinMargin(self) -> float: ... + + def setEdgeBlurSize(self, edge_blur_size: int) -> None: ... + + def getEdgeBlurSize(self) -> int: ... + + def setPass2Only(self, f: bool) -> None: ... + + def getPass2Only(self) -> bool: ... + + def getDefaultName(self) -> str: ... + + +class FastFeatureDetector(Feature2D): + # Functions + @classmethod + def create( + cls, threshold: int = ..., nonmaxSuppression: bool = ..., + type: FastFeatureDetector_DetectorType = ..., + ) -> FastFeatureDetector: ... + + def setThreshold(self, threshold: int) -> None: ... + + def getThreshold(self) -> int: ... + + def setNonmaxSuppression(self, f: bool) -> None: ... + + def getNonmaxSuppression(self) -> bool: ... + + def setType(self, type: FastFeatureDetector_DetectorType) -> None: ... + + def getType(self) -> FastFeatureDetector_DetectorType: ... + + def getDefaultName(self) -> str: ... + + +class AgastFeatureDetector(Feature2D): + # Functions + @classmethod + def create( + cls, threshold: int = ..., nonmaxSuppression: bool = ..., + type: AgastFeatureDetector_DetectorType = ..., + ) -> AgastFeatureDetector: ... + + def setThreshold(self, threshold: int) -> None: ... + + def getThreshold(self) -> int: ... + + def setNonmaxSuppression(self, f: bool) -> None: ... + + def getNonmaxSuppression(self) -> bool: ... + + def setType(self, type: AgastFeatureDetector_DetectorType) -> None: ... + + def getType(self) -> AgastFeatureDetector_DetectorType: ... + + def getDefaultName(self) -> str: ... + + +class GFTTDetector(Feature2D): + # Functions + @classmethod + @typing.overload + def create( + cls, + maxCorners: int = ..., + qualityLevel: float = ..., + minDistance: float = ..., + blockSize: int = ..., + useHarrisDetector: bool = ..., + k: float = ..., + ) -> GFTTDetector: ... + + @classmethod + @typing.overload + def create( + cls, + maxCorners: int, + qualityLevel: float, + minDistance: float, + blockSize: int, + gradiantSize: int, + useHarrisDetector: bool = ..., + k: float = ..., + ) -> GFTTDetector: ... + + def setMaxFeatures(self, maxFeatures: int) -> None: ... + + def getMaxFeatures(self) -> int: ... + + def setQualityLevel(self, qlevel: float) -> None: ... + + def getQualityLevel(self) -> float: ... + + def setMinDistance(self, minDistance: float) -> None: ... + + def getMinDistance(self) -> float: ... + + def setBlockSize(self, blockSize: int) -> None: ... + + def getBlockSize(self) -> int: ... + + def setGradientSize(self, gradientSize_: int) -> None: ... + + def getGradientSize(self) -> int: ... + + def setHarrisDetector(self, val: bool) -> None: ... + + def getHarrisDetector(self) -> bool: ... + + def setK(self, k: float) -> None: ... + + def getK(self) -> float: ... + + def getDefaultName(self) -> str: ... + + +class SimpleBlobDetector(Feature2D): + # Classes + class Params: + thresholdStep: float + minThreshold: float + maxThreshold: float + minRepeatability: int + minDistBetweenBlobs: float + filterByColor: bool + blobColor: int + filterByArea: bool + minArea: float + maxArea: float + filterByCircularity: bool + minCircularity: float + maxCircularity: float + filterByInertia: bool + minInertiaRatio: float + maxInertiaRatio: float + filterByConvexity: bool + minConvexity: float + maxConvexity: float + collectContours: bool + + # Functions + def __init__(self) -> None: ... + + # Functions + + @classmethod + def create(cls, parameters: SimpleBlobDetector.Params = ...) -> SimpleBlobDetector: ... + + def setParams(self, params: SimpleBlobDetector.Params) -> None: ... + + def getParams(self) -> SimpleBlobDetector.Params: ... + + def getDefaultName(self) -> str: ... + + def getBlobContours(self) -> typing.Sequence[typing.Sequence[cv2.typing.Point]]: ... + + +class KAZE(Feature2D): + # Functions + @classmethod + def create( + cls, extended: bool = ..., upright: bool = ..., threshold: float = ..., nOctaves: int = ..., + nOctaveLayers: int = ..., diffusivity: KAZE_DiffusivityType = ..., + ) -> KAZE: ... + + def setExtended(self, extended: bool) -> None: ... + + def getExtended(self) -> bool: ... + + def setUpright(self, upright: bool) -> None: ... + + def getUpright(self) -> bool: ... + + def setThreshold(self, threshold: float) -> None: ... + + def getThreshold(self) -> float: ... + + def setNOctaves(self, octaves: int) -> None: ... + + def getNOctaves(self) -> int: ... + + def setNOctaveLayers(self, octaveLayers: int) -> None: ... + + def getNOctaveLayers(self) -> int: ... + + def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ... + + def getDiffusivity(self) -> KAZE_DiffusivityType: ... + + def getDefaultName(self) -> str: ... + + +class AKAZE(Feature2D): + # Functions + @classmethod + def create( + cls, + descriptor_type: AKAZE_DescriptorType = ..., + descriptor_size: int = ..., + descriptor_channels: int = ..., + threshold: float = ..., + nOctaves: int = ..., + nOctaveLayers: int = ..., + diffusivity: KAZE_DiffusivityType = ..., + ) -> AKAZE: ... + + def setDescriptorType(self, dtype: AKAZE_DescriptorType) -> None: ... + + def getDescriptorType(self) -> AKAZE_DescriptorType: ... + + def setDescriptorSize(self, dsize: int) -> None: ... + + def getDescriptorSize(self) -> int: ... + + def setDescriptorChannels(self, dch: int) -> None: ... + + def getDescriptorChannels(self) -> int: ... + + def setThreshold(self, threshold: float) -> None: ... + + def getThreshold(self) -> float: ... + + def setNOctaves(self, octaves: int) -> None: ... + + def getNOctaves(self) -> int: ... + + def setNOctaveLayers(self, octaveLayers: int) -> None: ... + + def getNOctaveLayers(self) -> int: ... + + def setDiffusivity(self, diff: KAZE_DiffusivityType) -> None: ... + + def getDiffusivity(self) -> KAZE_DiffusivityType: ... + + def getDefaultName(self) -> str: ... + + +class DescriptorMatcher(Algorithm): + # Functions + @typing.overload + def add(self, descriptors: typing.Sequence[cv2.typing.MatLike]) -> None: ... + @typing.overload + def add(self, descriptors: typing.Sequence[UMat]) -> None: ... + + def getTrainDescriptors(self) -> typing.Sequence[cv2.typing.MatLike]: ... + + def clear(self) -> None: ... + + def empty(self) -> bool: ... + + def isMaskSupported(self) -> bool: ... + + def train(self) -> None: ... + + @typing.overload + def match( + self, queryDescriptors: cv2.typing.MatLike, trainDescriptors: cv2.typing.MatLike, + mask: cv2.typing.MatLike | None = ..., + ) -> typing.Sequence[DMatch]: ... + + @typing.overload + def match( + self, queryDescriptors: UMat, trainDescriptors: UMat, + mask: UMat | None = ..., + ) -> typing.Sequence[DMatch]: ... + + @typing.overload + def match( + self, queryDescriptors: cv2.typing.MatLike, + masks: typing.Sequence[cv2.typing.MatLike] | None = ..., + ) -> typing.Sequence[DMatch]: ... + + @typing.overload + def match(self, queryDescriptors: UMat, masks: typing.Sequence[UMat] | None = ...) -> typing.Sequence[DMatch]: ... + + @typing.overload + def knnMatch( + self, + queryDescriptors: cv2.typing.MatLike, + trainDescriptors: cv2.typing.MatLike, + k: int, + mask: cv2.typing.MatLike | None = ..., + compactResult: bool = ..., + ) -> typing.Sequence[typing.Sequence[DMatch]]: ... + + @typing.overload + def knnMatch( + self, queryDescriptors: UMat, trainDescriptors: UMat, k: int, mask: UMat | None = ..., + compactResult: bool = ..., + ) -> typing.Sequence[typing.Sequence[DMatch]]: ... + + @typing.overload + def knnMatch( + self, + queryDescriptors: cv2.typing.MatLike, + k: int, + masks: typing.Sequence[cv2.typing.MatLike] | None = ..., + compactResult: bool = ..., + ) -> typing.Sequence[typing.Sequence[DMatch]]: ... + + @typing.overload + def knnMatch( + self, + queryDescriptors: UMat, + k: int, + masks: typing.Sequence[UMat] | None = ..., + compactResult: bool = ..., + ) -> typing.Sequence[typing.Sequence[DMatch]]: ... + + @typing.overload + def radiusMatch( + self, + queryDescriptors: cv2.typing.MatLike, + trainDescriptors: cv2.typing.MatLike, + maxDistance: float, + mask: cv2.typing.MatLike | None = ..., + compactResult: bool = ..., + ) -> typing.Sequence[typing.Sequence[DMatch]]: ... + + @typing.overload + def radiusMatch( + self, queryDescriptors: UMat, trainDescriptors: UMat, maxDistance: float, mask: UMat | + None = ..., compactResult: bool = ..., + ) -> typing.Sequence[typing.Sequence[DMatch]]: ... + + @typing.overload + def radiusMatch( + self, + queryDescriptors: cv2.typing.MatLike, + maxDistance: float, + masks: typing.Sequence[cv2.typing.MatLike] | None = ..., + compactResult: bool = ..., + ) -> typing.Sequence[typing.Sequence[DMatch]]: ... + + @typing.overload + def radiusMatch( + self, + queryDescriptors: UMat, + maxDistance: float, + masks: typing.Sequence[UMat] | None = ..., + compactResult: bool = ..., + ) -> typing.Sequence[typing.Sequence[DMatch]]: ... + + @typing.overload + def write(self, fileName: str) -> None: ... + @typing.overload + def write(self, fs: FileStorage, name: str) -> None: ... + + @typing.overload + def read(self, fileName: str) -> None: ... + @typing.overload + def read(self, arg1: FileNode) -> None: ... + + def clone(self, emptyTrainData: bool = ...) -> DescriptorMatcher: ... + + @classmethod + @typing.overload + def create(cls, descriptorMatcherType: str) -> DescriptorMatcher: ... + @classmethod + @typing.overload + def create(cls, matcherType: DescriptorMatcher_MatcherType) -> DescriptorMatcher: ... + + +class BOWKMeansTrainer(BOWTrainer): + # Functions + def __init__( + self, + clusterCount: int, + termcrit: cv2.typing.TermCriteria = ..., + attempts: int = ..., + flags: int = ..., + ) -> None: ... + + @typing.overload + def cluster(self) -> cv2.typing.MatLike: ... + @typing.overload + def cluster(self, descriptors: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + + +class StereoMatcher(Algorithm): + # Functions + @typing.overload + def compute( + self, left: cv2.typing.MatLike, right: cv2.typing.MatLike, + disparity: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def compute(self, left: UMat, right: UMat, disparity: UMat | None = ...) -> UMat: ... + + def getMinDisparity(self) -> int: ... + + def setMinDisparity(self, minDisparity: int) -> None: ... + + def getNumDisparities(self) -> int: ... + + def setNumDisparities(self, numDisparities: int) -> None: ... + + def getBlockSize(self) -> int: ... + + def setBlockSize(self, blockSize: int) -> None: ... + + def getSpeckleWindowSize(self) -> int: ... + + def setSpeckleWindowSize(self, speckleWindowSize: int) -> None: ... + + def getSpeckleRange(self) -> int: ... + + def setSpeckleRange(self, speckleRange: int) -> None: ... + + def getDisp12MaxDiff(self) -> int: ... + + def setDisp12MaxDiff(self, disp12MaxDiff: int) -> None: ... + + +class BaseCascadeClassifier(Algorithm): + ... + + +class BackgroundSubtractor(Algorithm): + # Functions + @typing.overload + def apply( + self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | + None = ..., learningRate: float = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ... + + @typing.overload + def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... + @typing.overload + def getBackgroundImage(self, backgroundImage: UMat | None = ...) -> UMat: ... + + +class DenseOpticalFlow(Algorithm): + # Functions + @typing.overload + def calc(self, I0: cv2.typing.MatLike, I1: cv2.typing.MatLike, flow: cv2.typing.MatLike) -> cv2.typing.MatLike: ... + @typing.overload + def calc(self, I0: UMat, I1: UMat, flow: UMat) -> UMat: ... + + def collectGarbage(self) -> None: ... + + +class SparseOpticalFlow(Algorithm): + # Functions + @typing.overload + def calc( + self, + prevImg: cv2.typing.MatLike, + nextImg: cv2.typing.MatLike, + prevPts: cv2.typing.MatLike, + nextPts: cv2.typing.MatLike, + status: cv2.typing.MatLike | None = ..., + err: cv2.typing.MatLike | None = ..., + ) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def calc( + self, + prevImg: UMat, + nextImg: UMat, + prevPts: UMat, + nextPts: UMat, + status: UMat | None = ..., + err: UMat | None = ..., + ) -> tuple[ + UMat, + UMat, + UMat, + ]: ... + + +class TrackerMIL(Tracker): + # Classes + class Params: + samplerInitInRadius: float + samplerInitMaxNegNum: int + samplerSearchWinSize: float + samplerTrackInRadius: float + samplerTrackMaxPosNum: int + samplerTrackMaxNegNum: int + featureSetNumFeatures: int + + # Functions + def __init__(self) -> None: ... + + # Functions + + @classmethod + def create(cls, parameters: TrackerMIL.Params = ...) -> TrackerMIL: ... + + +class TrackerGOTURN(Tracker): + # Classes + class Params: + modelTxt: str + modelBin: str + + # Functions + def __init__(self) -> None: ... + + # Functions + + @classmethod + def create(cls, parameters: TrackerGOTURN.Params = ...) -> TrackerGOTURN: ... + + +class TrackerDaSiamRPN(Tracker): + # Classes + class Params: + model: str + kernel_cls1: str + kernel_r1: str + backend: int + target: int + + # Functions + def __init__(self) -> None: ... + + # Functions + + @classmethod + def create(cls, parameters: TrackerDaSiamRPN.Params = ...) -> TrackerDaSiamRPN: ... + + def getTrackingScore(self) -> float: ... + + +class TrackerNano(Tracker): + # Classes + class Params: + backbone: str + neckhead: str + backend: int + target: int + + # Functions + def __init__(self) -> None: ... + + # Functions + + @classmethod + def create(cls, parameters: TrackerNano.Params = ...) -> TrackerNano: ... + + def getTrackingScore(self) -> float: ... + + +class GeneralizedHoughBallard(GeneralizedHough): + # Functions + def setLevels(self, levels: int) -> None: ... + + def getLevels(self) -> int: ... + + def setVotesThreshold(self, votesThreshold: int) -> None: ... + + def getVotesThreshold(self) -> int: ... + + +class GeneralizedHoughGuil(GeneralizedHough): + # Functions + def setXi(self, xi: float) -> None: ... + + def getXi(self) -> float: ... + + def setLevels(self, levels: int) -> None: ... + + def getLevels(self) -> int: ... + + def setAngleEpsilon(self, angleEpsilon: float) -> None: ... + + def getAngleEpsilon(self) -> float: ... + + def setMinAngle(self, minAngle: float) -> None: ... + + def getMinAngle(self) -> float: ... + + def setMaxAngle(self, maxAngle: float) -> None: ... + + def getMaxAngle(self) -> float: ... + + def setAngleStep(self, angleStep: float) -> None: ... + + def getAngleStep(self) -> float: ... + + def setAngleThresh(self, angleThresh: int) -> None: ... + + def getAngleThresh(self) -> int: ... + + def setMinScale(self, minScale: float) -> None: ... + + def getMinScale(self) -> float: ... + + def setMaxScale(self, maxScale: float) -> None: ... + + def getMaxScale(self) -> float: ... + + def setScaleStep(self, scaleStep: float) -> None: ... + + def getScaleStep(self) -> float: ... + + def setScaleThresh(self, scaleThresh: int) -> None: ... + + def getScaleThresh(self) -> int: ... + + def setPosThresh(self, posThresh: int) -> None: ... + + def getPosThresh(self) -> int: ... + + +class TonemapDrago(Tonemap): + # Functions + def getSaturation(self) -> float: ... + + def setSaturation(self, saturation: float) -> None: ... + + def getBias(self) -> float: ... + + def setBias(self, bias: float) -> None: ... + + +class TonemapReinhard(Tonemap): + # Functions + def getIntensity(self) -> float: ... + + def setIntensity(self, intensity: float) -> None: ... + + def getLightAdaptation(self) -> float: ... + + def setLightAdaptation(self, light_adapt: float) -> None: ... + + def getColorAdaptation(self) -> float: ... + + def setColorAdaptation(self, color_adapt: float) -> None: ... + + +class TonemapMantiuk(Tonemap): + # Functions + def getScale(self) -> float: ... + + def setScale(self, scale: float) -> None: ... + + def getSaturation(self) -> float: ... + + def setSaturation(self, saturation: float) -> None: ... + + +class AlignMTB(AlignExposures): + # Functions + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + dst: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + response: cv2.typing.MatLike, + ) -> None: ... + + @typing.overload + def process( + self, + src: typing.Sequence[UMat], + dst: typing.Sequence[cv2.typing.MatLike], + times: UMat, + response: UMat, + ) -> None: ... + + @typing.overload + def process(self, src: typing.Sequence[cv2.typing.MatLike], dst: typing.Sequence[cv2.typing.MatLike]) -> None: ... + @typing.overload + def process(self, src: typing.Sequence[UMat], dst: typing.Sequence[cv2.typing.MatLike]) -> None: ... + + @typing.overload + def calculateShift(self, img0: cv2.typing.MatLike, img1: cv2.typing.MatLike) -> cv2.typing.Point: ... + @typing.overload + def calculateShift(self, img0: UMat, img1: UMat) -> cv2.typing.Point: ... + + @typing.overload + def shiftMat( + self, src: cv2.typing.MatLike, shift: cv2.typing.Point, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def shiftMat(self, src: UMat, shift: cv2.typing.Point, dst: UMat | None = ...) -> UMat: ... + + @typing.overload + def computeBitmaps( + self, + img: cv2.typing.MatLike, + tb: cv2.typing.MatLike | None = ..., + eb: cv2.typing.MatLike | None = ..., + ) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def computeBitmaps(self, img: UMat, tb: UMat | None = ..., eb: UMat | None = ...) -> tuple[UMat, UMat]: ... + + def getMaxBits(self) -> int: ... + + def setMaxBits(self, max_bits: int) -> None: ... + + def getExcludeRange(self) -> int: ... + + def setExcludeRange(self, exclude_range: int) -> None: ... + + def getCut(self) -> bool: ... + + def setCut(self, value: bool) -> None: ... + + +class CalibrateDebevec(CalibrateCRF): + # Functions + def getLambda(self) -> float: ... + + def setLambda(self, lambda_: float) -> None: ... + + def getSamples(self) -> int: ... + + def setSamples(self, samples: int) -> None: ... + + def getRandom(self) -> bool: ... + + def setRandom(self, random: bool) -> None: ... + + +class CalibrateRobertson(CalibrateCRF): + # Functions + def getMaxIter(self) -> int: ... + + def setMaxIter(self, max_iter: int) -> None: ... + + def getThreshold(self) -> float: ... + + def setThreshold(self, threshold: float) -> None: ... + + def getRadiance(self) -> cv2.typing.MatLike: ... + + +class MergeDebevec(MergeExposures): + # Functions + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + response: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... + + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... + + +class MergeMertens(MergeExposures): + # Functions + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + response: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... + + @typing.overload + def process( + self, src: typing.Sequence[cv2.typing.MatLike], + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def process(self, src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... + + def getContrastWeight(self) -> float: ... + + def setContrastWeight(self, contrast_weiht: float) -> None: ... + + def getSaturationWeight(self) -> float: ... + + def setSaturationWeight(self, saturation_weight: float) -> None: ... + + def getExposureWeight(self) -> float: ... + + def setExposureWeight(self, exposure_weight: float) -> None: ... + + +class MergeRobertson(MergeExposures): + # Functions + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + response: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def process(self, src: typing.Sequence[UMat], times: UMat, response: UMat, dst: UMat | None = ...) -> UMat: ... + + @typing.overload + def process( + self, + src: typing.Sequence[cv2.typing.MatLike], + times: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def process(self, src: typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ... + + +class BFMatcher(DescriptorMatcher): + # Functions + def __init__(self, normType: int = ..., crossCheck: bool = ...) -> None: ... + + @classmethod + def create(cls, normType: int = ..., crossCheck: bool = ...) -> BFMatcher: ... + + +class FlannBasedMatcher(DescriptorMatcher): + # Functions + def __init__( + self, indexParams: cv2.typing.IndexParams = ..., + searchParams: cv2.typing.SearchParams = ..., + ) -> None: ... + + @classmethod + def create(cls) -> FlannBasedMatcher: ... + + +class StereoBM(StereoMatcher): + # Functions + def getPreFilterType(self) -> int: ... + + def setPreFilterType(self, preFilterType: int) -> None: ... + + def getPreFilterSize(self) -> int: ... + + def setPreFilterSize(self, preFilterSize: int) -> None: ... + + def getPreFilterCap(self) -> int: ... + + def setPreFilterCap(self, preFilterCap: int) -> None: ... + + def getTextureThreshold(self) -> int: ... + + def setTextureThreshold(self, textureThreshold: int) -> None: ... + + def getUniquenessRatio(self) -> int: ... + + def setUniquenessRatio(self, uniquenessRatio: int) -> None: ... + + def getSmallerBlockSize(self) -> int: ... + + def setSmallerBlockSize(self, blockSize: int) -> None: ... + + def getROI1(self) -> cv2.typing.Rect: ... + + def setROI1(self, roi1: cv2.typing.Rect) -> None: ... + + def getROI2(self) -> cv2.typing.Rect: ... + + def setROI2(self, roi2: cv2.typing.Rect) -> None: ... + + @classmethod + def create(cls, numDisparities: int = ..., blockSize: int = ...) -> StereoBM: ... + + +class StereoSGBM(StereoMatcher): + # Functions + def getPreFilterCap(self) -> int: ... + + def setPreFilterCap(self, preFilterCap: int) -> None: ... + + def getUniquenessRatio(self) -> int: ... + + def setUniquenessRatio(self, uniquenessRatio: int) -> None: ... + + def getP1(self) -> int: ... + + def setP1(self, P1: int) -> None: ... + + def getP2(self) -> int: ... + + def setP2(self, P2: int) -> None: ... + + def getMode(self) -> int: ... + + def setMode(self, mode: int) -> None: ... + + @classmethod + def create( + cls, + minDisparity: int = ..., + numDisparities: int = ..., + blockSize: int = ..., + P1: int = ..., + P2: int = ..., + disp12MaxDiff: int = ..., + preFilterCap: int = ..., + uniquenessRatio: int = ..., + speckleWindowSize: int = ..., + speckleRange: int = ..., + mode: int = ..., + ) -> StereoSGBM: ... + + +class BackgroundSubtractorMOG2(BackgroundSubtractor): + # Functions + def getHistory(self) -> int: ... + + def setHistory(self, history: int) -> None: ... + + def getNMixtures(self) -> int: ... + + def setNMixtures(self, nmixtures: int) -> None: ... + + def getBackgroundRatio(self) -> float: ... + + def setBackgroundRatio(self, ratio: float) -> None: ... + + def getVarThreshold(self) -> float: ... + + def setVarThreshold(self, varThreshold: float) -> None: ... + + def getVarThresholdGen(self) -> float: ... + + def setVarThresholdGen(self, varThresholdGen: float) -> None: ... + + def getVarInit(self) -> float: ... + + def setVarInit(self, varInit: float) -> None: ... + + def getVarMin(self) -> float: ... + + def setVarMin(self, varMin: float) -> None: ... + + def getVarMax(self) -> float: ... + + def setVarMax(self, varMax: float) -> None: ... + + def getComplexityReductionThreshold(self) -> float: ... + + def setComplexityReductionThreshold(self, ct: float) -> None: ... + + def getDetectShadows(self) -> bool: ... + + def setDetectShadows(self, detectShadows: bool) -> None: ... + + def getShadowValue(self) -> int: ... + + def setShadowValue(self, value: int) -> None: ... + + def getShadowThreshold(self) -> float: ... + + def setShadowThreshold(self, threshold: float) -> None: ... + + @typing.overload + def apply( + self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | + None = ..., learningRate: float = ..., + ) -> cv2.typing.MatLike: ... + + @typing.overload + def apply(self, image: UMat, fgmask: UMat | None = ..., learningRate: float = ...) -> UMat: ... + + +class BackgroundSubtractorKNN(BackgroundSubtractor): + # Functions + def getHistory(self) -> int: ... + + def setHistory(self, history: int) -> None: ... + + def getNSamples(self) -> int: ... + + def setNSamples(self, _nN: int) -> None: ... + + def getDist2Threshold(self) -> float: ... + + def setDist2Threshold(self, _dist2Threshold: float) -> None: ... + + def getkNNSamples(self) -> int: ... + + def setkNNSamples(self, _nkNN: int) -> None: ... + + def getDetectShadows(self) -> bool: ... + + def setDetectShadows(self, detectShadows: bool) -> None: ... + + def getShadowValue(self) -> int: ... + + def setShadowValue(self, value: int) -> None: ... + + def getShadowThreshold(self) -> float: ... + + def setShadowThreshold(self, threshold: float) -> None: ... + + +class FarnebackOpticalFlow(DenseOpticalFlow): + # Functions + def getNumLevels(self) -> int: ... + + def setNumLevels(self, numLevels: int) -> None: ... + + def getPyrScale(self) -> float: ... + + def setPyrScale(self, pyrScale: float) -> None: ... + + def getFastPyramids(self) -> bool: ... + + def setFastPyramids(self, fastPyramids: bool) -> None: ... + + def getWinSize(self) -> int: ... + + def setWinSize(self, winSize: int) -> None: ... + + def getNumIters(self) -> int: ... + + def setNumIters(self, numIters: int) -> None: ... + + def getPolyN(self) -> int: ... + + def setPolyN(self, polyN: int) -> None: ... + + def getPolySigma(self) -> float: ... + + def setPolySigma(self, polySigma: float) -> None: ... + + def getFlags(self) -> int: ... + + def setFlags(self, flags: int) -> None: ... + + @classmethod + def create( + cls, + numLevels: int = ..., + pyrScale: float = ..., + fastPyramids: bool = ..., + winSize: int = ..., + numIters: int = ..., + polyN: int = ..., + polySigma: float = ..., + flags: int = ..., + ) -> FarnebackOpticalFlow: ... + + +class VariationalRefinement(DenseOpticalFlow): + # Functions + @typing.overload + def calcUV( + self, + I0: cv2.typing.MatLike, + I1: cv2.typing.MatLike, + flow_u: cv2.typing.MatLike, + flow_v: cv2.typing.MatLike, + ) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + ]: ... + + @typing.overload + def calcUV(self, I0: UMat, I1: UMat, flow_u: UMat, flow_v: UMat) -> tuple[UMat, UMat]: ... + + def getFixedPointIterations(self) -> int: ... + + def setFixedPointIterations(self, val: int) -> None: ... + + def getSorIterations(self) -> int: ... + + def setSorIterations(self, val: int) -> None: ... + + def getOmega(self) -> float: ... + + def setOmega(self, val: float) -> None: ... + + def getAlpha(self) -> float: ... + + def setAlpha(self, val: float) -> None: ... + + def getDelta(self) -> float: ... + + def setDelta(self, val: float) -> None: ... + + def getGamma(self) -> float: ... + + def setGamma(self, val: float) -> None: ... + + @classmethod + def create(cls) -> VariationalRefinement: ... + + +class DISOpticalFlow(DenseOpticalFlow): + # Functions + def getFinestScale(self) -> int: ... + + def setFinestScale(self, val: int) -> None: ... + + def getPatchSize(self) -> int: ... + + def setPatchSize(self, val: int) -> None: ... + + def getPatchStride(self) -> int: ... + + def setPatchStride(self, val: int) -> None: ... + + def getGradientDescentIterations(self) -> int: ... + + def setGradientDescentIterations(self, val: int) -> None: ... + + def getVariationalRefinementIterations(self) -> int: ... + + def setVariationalRefinementIterations(self, val: int) -> None: ... + + def getVariationalRefinementAlpha(self) -> float: ... + + def setVariationalRefinementAlpha(self, val: float) -> None: ... + + def getVariationalRefinementDelta(self) -> float: ... + + def setVariationalRefinementDelta(self, val: float) -> None: ... + + def getVariationalRefinementGamma(self) -> float: ... + + def setVariationalRefinementGamma(self, val: float) -> None: ... + + def getUseMeanNormalization(self) -> bool: ... + + def setUseMeanNormalization(self, val: bool) -> None: ... + + def getUseSpatialPropagation(self) -> bool: ... + + def setUseSpatialPropagation(self, val: bool) -> None: ... + + @classmethod + def create(cls, preset: int = ...) -> DISOpticalFlow: ... + + +class SparsePyrLKOpticalFlow(SparseOpticalFlow): + # Functions + def getWinSize(self) -> cv2.typing.Size: ... + + def setWinSize(self, winSize: cv2.typing.Size) -> None: ... + + def getMaxLevel(self) -> int: ... + + def setMaxLevel(self, maxLevel: int) -> None: ... + + def getTermCriteria(self) -> cv2.typing.TermCriteria: ... + + def setTermCriteria(self, crit: cv2.typing.TermCriteria) -> None: ... + + def getFlags(self) -> int: ... + + def setFlags(self, flags: int) -> None: ... + + def getMinEigThreshold(self) -> float: ... + + def setMinEigThreshold(self, minEigThreshold: float) -> None: ... + + @classmethod + def create( + cls, winSize: cv2.typing.Size = ..., maxLevel: int = ..., crit: cv2.typing.TermCriteria = ..., + flags: int = ..., minEigThreshold: float = ..., + ) -> SparsePyrLKOpticalFlow: ... + + +# Functions +@typing.overload +def CamShift( + probImage: cv2.typing.MatLike, + window: cv2.typing.Rect, + criteria: cv2.typing.TermCriteria, +) -> tuple[ + cv2.typing.RotatedRect, + cv2.typing.Rect, +]: ... + + +@typing.overload +def CamShift( + probImage: UMat, + window: cv2.typing.Rect, + criteria: cv2.typing.TermCriteria, +) -> tuple[ + cv2.typing.RotatedRect, + cv2.typing.Rect, +]: ... + + +@typing.overload +def Canny( + image: cv2.typing.MatLike, threshold1: float, threshold2: float, edges: cv2.typing.MatLike | + None = ..., apertureSize: int = ..., L2gradient: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def Canny( + image: UMat, + threshold1: float, + threshold2: float, + edges: UMat | None = ..., + apertureSize: int = ..., + L2gradient: bool = ..., +) -> UMat: ... + + +@typing.overload +def Canny( + dx: cv2.typing.MatLike, dy: cv2.typing.MatLike, threshold1: float, threshold2: float, + edges: cv2.typing.MatLike | None = ..., L2gradient: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def Canny( + dx: UMat, dy: UMat, threshold1: float, threshold2: float, + edges: UMat | None = ..., L2gradient: bool = ..., +) -> UMat: ... + + +@typing.overload +def EMD( + signature1: cv2.typing.MatLike, + signature2: cv2.typing.MatLike, + distType: int, + cost: cv2.typing.MatLike | None = ..., + lowerBound: float | None = ..., + flow: cv2.typing.MatLike | None = ..., +) -> tuple[ + float, + float, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def EMD( + signature1: UMat, + signature2: UMat, + distType: int, + cost: UMat | None = ..., + lowerBound: float | None = ..., + flow: UMat | None = ..., +) -> tuple[ + float, + float, + UMat, +]: ... + + +@typing.overload +def GaussianBlur( + src: cv2.typing.MatLike, + ksize: cv2.typing.Size, + sigmaX: float, + dst: cv2.typing.MatLike | None = ..., + sigmaY: float = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def GaussianBlur( + src: UMat, + ksize: cv2.typing.Size, + sigmaX: float, + dst: UMat | None = ..., + sigmaY: float = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def HoughCircles( + image: cv2.typing.MatLike, + method: int, + dp: float, + minDist: float, + circles: cv2.typing.MatLike | None = ..., + param1: float = ..., + param2: float = ..., + minRadius: int = ..., + maxRadius: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def HoughCircles( + image: UMat, + method: int, + dp: float, + minDist: float, + circles: UMat | None = ..., + param1: float = ..., + param2: float = ..., + minRadius: int = ..., + maxRadius: int = ..., +) -> UMat: ... + + +@typing.overload +def HoughLines( + image: cv2.typing.MatLike, + rho: float, + theta: float, + threshold: int, + lines: cv2.typing.MatLike | None = ..., + srn: float = ..., + stn: float = ..., + min_theta: float = ..., + max_theta: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def HoughLines( + image: UMat, + rho: float, + theta: float, + threshold: int, + lines: UMat | None = ..., + srn: float = ..., + stn: float = ..., + min_theta: float = ..., + max_theta: float = ..., +) -> UMat: ... + + +@typing.overload +def HoughLinesP( + image: cv2.typing.MatLike, + rho: float, + theta: float, + threshold: int, + lines: cv2.typing.MatLike | None = ..., + minLineLength: float = ..., + maxLineGap: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def HoughLinesP( + image: UMat, + rho: float, + theta: float, + threshold: int, + lines: UMat | None = ..., + minLineLength: float = ..., + maxLineGap: float = ..., +) -> UMat: ... + + +@typing.overload +def HoughLinesPointSet( + point: cv2.typing.MatLike, + lines_max: int, + threshold: int, + min_rho: float, + max_rho: float, + rho_step: float, + min_theta: float, + max_theta: float, + theta_step: float, + lines: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def HoughLinesPointSet( + point: UMat, + lines_max: int, + threshold: int, + min_rho: float, + max_rho: float, + rho_step: float, + min_theta: float, + max_theta: float, + theta_step: float, + lines: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def HoughLinesWithAccumulator( + image: cv2.typing.MatLike, + rho: float, + theta: float, + threshold: int, + lines: cv2.typing.MatLike | None = ..., + srn: float = ..., + stn: float = ..., + min_theta: float = ..., + max_theta: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def HoughLinesWithAccumulator( + image: UMat, + rho: float, + theta: float, + threshold: int, + lines: UMat | None = ..., + srn: float = ..., + stn: float = ..., + min_theta: float = ..., + max_theta: float = ..., +) -> UMat: ... + + +@typing.overload +def HuMoments(m: cv2.typing.Moments, hu: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def HuMoments(m: cv2.typing.Moments, hu: UMat | None = ...) -> UMat: ... + + +@typing.overload +def LUT( + src: cv2.typing.MatLike, lut: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def LUT(src: UMat, lut: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def Laplacian( + src: cv2.typing.MatLike, ddepth: int, dst: cv2.typing.MatLike | None = ..., ksize: int = ..., + scale: float = ..., delta: float = ..., borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def Laplacian( + src: UMat, + ddepth: int, + dst: UMat | None = ..., + ksize: int = ..., + scale: float = ..., + delta: float = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def Mahalanobis(v1: cv2.typing.MatLike, v2: cv2.typing.MatLike, icovar: cv2.typing.MatLike) -> float: ... +@typing.overload +def Mahalanobis(v1: UMat, v2: UMat, icovar: UMat) -> float: ... + + +@typing.overload +def PCABackProject( + data: cv2.typing.MatLike, + mean: cv2.typing.MatLike, + eigenvectors: cv2.typing.MatLike, + result: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def PCABackProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ... + + +@typing.overload +def PCACompute( + data: cv2.typing.MatLike, + mean: cv2.typing.MatLike, + eigenvectors: cv2.typing.MatLike | None = ..., + maxComponents: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def PCACompute( + data: UMat, + mean: UMat, + eigenvectors: UMat | None = ..., + maxComponents: int = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def PCACompute( + data: cv2.typing.MatLike, + mean: cv2.typing.MatLike, + retainedVariance: float, + eigenvectors: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def PCACompute( + data: UMat, + mean: UMat, + retainedVariance: float, + eigenvectors: UMat | None = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def PCACompute2( + data: cv2.typing.MatLike, + mean: cv2.typing.MatLike, + eigenvectors: cv2.typing.MatLike | None = ..., + eigenvalues: cv2.typing.MatLike | None = ..., + maxComponents: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def PCACompute2( + data: UMat, + mean: UMat, + eigenvectors: UMat | None = ..., + eigenvalues: UMat | None = ..., + maxComponents: int = ..., +) -> tuple[ + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def PCACompute2( + data: cv2.typing.MatLike, + mean: cv2.typing.MatLike, + retainedVariance: float, + eigenvectors: cv2.typing.MatLike | None = ..., + eigenvalues: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def PCACompute2( + data: UMat, + mean: UMat, + retainedVariance: float, + eigenvectors: UMat | None = ..., + eigenvalues: UMat | None = ..., +) -> tuple[ + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def PCAProject( + data: cv2.typing.MatLike, mean: cv2.typing.MatLike, eigenvectors: cv2.typing.MatLike, + result: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def PCAProject(data: UMat, mean: UMat, eigenvectors: UMat, result: UMat | None = ...) -> UMat: ... + + +@typing.overload +def PSNR(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, R: float = ...) -> float: ... +@typing.overload +def PSNR(src1: UMat, src2: UMat, R: float = ...) -> float: ... + + +@typing.overload +def RQDecomp3x3( + src: cv2.typing.MatLike, + mtxR: cv2.typing.MatLike | None = ..., + mtxQ: cv2.typing.MatLike | None = ..., + Qx: cv2.typing.MatLike | None = ..., + Qy: cv2.typing.MatLike | None = ..., + Qz: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.Vec3d, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def RQDecomp3x3( + src: UMat, + mtxR: UMat | None = ..., + mtxQ: UMat | None = ..., + Qx: UMat | None = ..., + Qy: UMat | None = ..., + Qz: UMat | None = ..., +) -> tuple[ + cv2.typing.Vec3d, + UMat, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def Rodrigues( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + jacobian: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def Rodrigues(src: UMat, dst: UMat | None = ..., jacobian: UMat | None = ...) -> tuple[UMat, UMat]: ... + + +@typing.overload +def SVBackSubst( + w: cv2.typing.MatLike, + u: cv2.typing.MatLike, + vt: cv2.typing.MatLike, + rhs: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def SVBackSubst(w: UMat, u: UMat, vt: UMat, rhs: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def SVDecomp( + src: cv2.typing.MatLike, + w: cv2.typing.MatLike | None = ..., + u: cv2.typing.MatLike | None = ..., + vt: cv2.typing.MatLike | None = ..., + flags: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def SVDecomp( + src: UMat, + w: UMat | None = ..., + u: UMat | None = ..., + vt: UMat | None = ..., + flags: int = ..., +) -> tuple[ + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def Scharr( + src: cv2.typing.MatLike, ddepth: int, dx: int, dy: int, dst: cv2.typing.MatLike | None = ..., + scale: float = ..., delta: float = ..., borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def Scharr( + src: UMat, + ddepth: int, + dx: int, + dy: int, + dst: UMat | None = ..., + scale: float = ..., + delta: float = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def Sobel( + src: cv2.typing.MatLike, ddepth: int, dx: int, dy: int, dst: cv2.typing.MatLike | None = ..., + ksize: int = ..., scale: float = ..., delta: float = ..., borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def Sobel( + src: UMat, + ddepth: int, + dx: int, + dy: int, + dst: UMat | None = ..., + ksize: int = ..., + scale: float = ..., + delta: float = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def absdiff( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def absdiff(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def accumulate( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike, + mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def accumulate(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... + + +@typing.overload +def accumulateProduct( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike, + mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def accumulateProduct(src1: UMat, src2: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... + + +@typing.overload +def accumulateSquare( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike, + mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def accumulateSquare(src: UMat, dst: UMat, mask: UMat | None = ...) -> UMat: ... + + +@typing.overload +def accumulateWeighted( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alpha: float, + mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def accumulateWeighted(src: UMat, dst: UMat, alpha: float, mask: UMat | None = ...) -> UMat: ... + + +@typing.overload +def adaptiveThreshold( + src: cv2.typing.MatLike, + maxValue: float, + adaptiveMethod: int, + thresholdType: int, + blockSize: int, + C: float, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def adaptiveThreshold( + src: UMat, + maxValue: float, + adaptiveMethod: int, + thresholdType: int, + blockSize: int, + C: float, + dst: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def add( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., + mask: cv2.typing.MatLike | None = ..., dtype: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def add(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ... + + +def addText( + img: cv2.typing.MatLike, + text: str, + org: cv2.typing.Point, + nameFont: str, + pointSize: int = ..., + color: cv2.typing.Scalar = ..., + weight: int = ..., + style: int = ..., + spacing: int = ..., +) -> None: ... + + +@typing.overload +def addWeighted( + src1: cv2.typing.MatLike, + alpha: float, + src2: cv2.typing.MatLike, + beta: float, + gamma: float, + dst: cv2.typing.MatLike | None = ..., + dtype: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def addWeighted( + src1: UMat, + alpha: float, + src2: UMat, + beta: float, + gamma: float, + dst: UMat | None = ..., + dtype: int = ..., +) -> UMat: ... + + +@typing.overload +def applyColorMap( + src: cv2.typing.MatLike, + colormap: int, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def applyColorMap(src: UMat, colormap: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def applyColorMap( + src: cv2.typing.MatLike, userColor: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def applyColorMap(src: UMat, userColor: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def approxPolyDP( + curve: cv2.typing.MatLike, epsilon: float, closed: bool, + approxCurve: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def approxPolyDP(curve: UMat, epsilon: float, closed: bool, approxCurve: UMat | None = ...) -> UMat: ... + + +@typing.overload +def arcLength(curve: cv2.typing.MatLike, closed: bool) -> float: ... +@typing.overload +def arcLength(curve: UMat, closed: bool) -> float: ... + + +@typing.overload +def arrowedLine( + img: cv2.typing.MatLike, + pt1: cv2.typing.Point, + pt2: cv2.typing.Point, + color: cv2.typing.Scalar, + thickness: int = ..., + line_type: int = ..., + shift: int = ..., + tipLength: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def arrowedLine( + img: UMat, + pt1: cv2.typing.Point, + pt2: cv2.typing.Point, + color: cv2.typing.Scalar, + thickness: int = ..., + line_type: int = ..., + shift: int = ..., + tipLength: float = ..., +) -> UMat: ... + + +@typing.overload +def batchDistance( + src1: cv2.typing.MatLike, + src2: cv2.typing.MatLike, + dtype: int, + dist: cv2.typing.MatLike | None = ..., + nidx: cv2.typing.MatLike | None = ..., + normType: int = ..., + K: int = ..., + mask: cv2.typing.MatLike | None = ..., + update: int = ..., + crosscheck: bool = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def batchDistance( + src1: UMat, + src2: UMat, + dtype: int, + dist: UMat | None = ..., + nidx: UMat | None = ..., + normType: int = ..., + K: int = ..., + mask: UMat | None = ..., + update: int = ..., + crosscheck: bool = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def bilateralFilter( + src: cv2.typing.MatLike, + d: int, + sigmaColor: float, + sigmaSpace: float, + dst: cv2.typing.MatLike | None = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def bilateralFilter( + src: UMat, + d: int, + sigmaColor: float, + sigmaSpace: float, + dst: UMat | None = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def bitwise_and( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | + None = ..., mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def bitwise_and(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + + +@typing.overload +def bitwise_not( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., + mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def bitwise_not(src: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + + +@typing.overload +def bitwise_or( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | + None = ..., mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def bitwise_or(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + + +@typing.overload +def bitwise_xor( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | + None = ..., mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def bitwise_xor(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ...) -> UMat: ... + + +@typing.overload +def blendLinear( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, weights1: cv2.typing.MatLike, + weights2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def blendLinear(src1: UMat, src2: UMat, weights1: UMat, weights2: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def blur( + src: cv2.typing.MatLike, ksize: cv2.typing.Size, dst: cv2.typing.MatLike | None = ..., + anchor: cv2.typing.Point = ..., borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def blur( + src: UMat, + ksize: cv2.typing.Size, + dst: UMat | None = ..., + anchor: cv2.typing.Point = ..., + borderType: int = ..., +) -> UMat: ... + + +def borderInterpolate(p: int, len: int, borderType: int) -> int: ... + + +@typing.overload +def boundingRect(array: cv2.typing.MatLike) -> cv2.typing.Rect: ... +@typing.overload +def boundingRect(array: UMat) -> cv2.typing.Rect: ... + + +@typing.overload +def boxFilter( + src: cv2.typing.MatLike, + ddepth: int, + ksize: cv2.typing.Size, + dst: cv2.typing.MatLike | None = ..., + anchor: cv2.typing.Point = ..., + normalize: bool = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def boxFilter( + src: UMat, + ddepth: int, + ksize: cv2.typing.Size, + dst: UMat | None = ..., + anchor: cv2.typing.Point = ..., + normalize: bool = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def boxPoints(box: cv2.typing.RotatedRect, points: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def boxPoints(box: cv2.typing.RotatedRect, points: UMat | None = ...) -> UMat: ... + + +@typing.overload +def buildOpticalFlowPyramid( + img: cv2.typing.MatLike, + winSize: cv2.typing.Size, + maxLevel: int, + pyramid: typing.Sequence[cv2.typing.MatLike] | None = ..., + withDerivatives: bool = ..., + pyrBorder: int = ..., + derivBorder: int = ..., + tryReuseInputImage: bool = ..., +) -> tuple[ + int, + typing.Sequence[cv2.typing.MatLike], +]: ... + + +@typing.overload +def buildOpticalFlowPyramid( + img: UMat, + winSize: cv2.typing.Size, + maxLevel: int, + pyramid: typing.Sequence[UMat] | None = ..., + withDerivatives: bool = ..., + pyrBorder: int = ..., + derivBorder: int = ..., + tryReuseInputImage: bool = ..., +) -> tuple[ + int, + typing.Sequence[UMat], +]: ... + + +@typing.overload +def calcBackProject( + images: typing.Sequence[cv2.typing.MatLike], + channels: typing.Sequence[int], + hist: cv2.typing.MatLike, + ranges: typing.Sequence[float], + scale: float, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def calcBackProject( + images: typing.Sequence[UMat], + channels: typing.Sequence[int], + hist: UMat, + ranges: typing.Sequence[float], + scale: float, + dst: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def calcCovarMatrix( + samples: cv2.typing.MatLike, + mean: cv2.typing.MatLike, + flags: int, + covar: cv2.typing.MatLike | None = ..., + ctype: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def calcCovarMatrix( + samples: UMat, + mean: UMat, + flags: int, + covar: UMat | None = ..., + ctype: int = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def calcHist( + images: typing.Sequence[cv2.typing.MatLike], + channels: typing.Sequence[int], + mask: cv2.typing.MatLike | None, + histSize: typing.Sequence[int], + ranges: typing.Sequence[float], + hist: cv2.typing.MatLike | None = ..., + accumulate: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def calcHist( + images: typing.Sequence[UMat], + channels: typing.Sequence[int], + mask: UMat | None, + histSize: typing.Sequence[int], + ranges: typing.Sequence[float], + hist: UMat | None = ..., + accumulate: bool = ..., +) -> UMat: ... + + +@typing.overload +def calcOpticalFlowFarneback( + prev: cv2.typing.MatLike, + next: cv2.typing.MatLike, + flow: cv2.typing.MatLike, + pyr_scale: float, + levels: int, + winsize: int, + iterations: int, + poly_n: int, + poly_sigma: float, + flags: int, +) -> cv2.typing.MatLike: ... + + +@typing.overload +def calcOpticalFlowFarneback( + prev: UMat, + next: UMat, + flow: UMat, + pyr_scale: float, + levels: int, + winsize: int, + iterations: int, + poly_n: int, + poly_sigma: float, + flags: int, +) -> UMat: ... + + +@typing.overload +def calcOpticalFlowPyrLK( + prevImg: cv2.typing.MatLike, + nextImg: cv2.typing.MatLike, + prevPts: cv2.typing.MatLike, + nextPts: cv2.typing.MatLike, + status: cv2.typing.MatLike | None = ..., + err: cv2.typing.MatLike | None = ..., + winSize: cv2.typing.Size = ..., + maxLevel: int = ..., + criteria: cv2.typing.TermCriteria = ..., + flags: int = ..., + minEigThreshold: float = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def calcOpticalFlowPyrLK( + prevImg: UMat, + nextImg: UMat, + prevPts: UMat, + nextPts: UMat, + status: UMat | None = ..., + err: UMat | None = ..., + winSize: cv2.typing.Size = ..., + maxLevel: int = ..., + criteria: cv2.typing.TermCriteria = ..., + flags: int = ..., + minEigThreshold: float = ..., +) -> tuple[ + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def calibrateCamera( + objectPoints: typing.Sequence[cv2.typing.MatLike], + imagePoints: typing.Sequence[cv2.typing.MatLike], + imageSize: cv2.typing.Size, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], +]: ... + + +@typing.overload +def calibrateCamera( + objectPoints: typing.Sequence[UMat], + imagePoints: typing.Sequence[UMat], + imageSize: cv2.typing.Size, + cameraMatrix: UMat, + distCoeffs: UMat, + rvecs: typing.Sequence[UMat] | None = ..., + tvecs: typing.Sequence[UMat] | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + UMat, + UMat, + typing.Sequence[UMat], + typing.Sequence[UMat], +]: ... + + +@typing.overload +def calibrateCameraExtended( + objectPoints: typing.Sequence[cv2.typing.MatLike], + imagePoints: typing.Sequence[cv2.typing.MatLike], + imageSize: cv2.typing.Size, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., + stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., + perViewErrors: cv2.typing.MatLike | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def calibrateCameraExtended( + objectPoints: typing.Sequence[UMat], + imagePoints: typing.Sequence[UMat], + imageSize: cv2.typing.Size, + cameraMatrix: UMat, + distCoeffs: UMat, + rvecs: typing.Sequence[UMat] | None = ..., + tvecs: typing.Sequence[UMat] | None = ..., + stdDeviationsIntrinsics: UMat | None = ..., + stdDeviationsExtrinsics: UMat | None = ..., + perViewErrors: UMat | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + UMat, + UMat, + typing.Sequence[UMat], + typing.Sequence[UMat], + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def calibrateCameraRO( + objectPoints: typing.Sequence[cv2.typing.MatLike], + imagePoints: typing.Sequence[cv2.typing.MatLike], + imageSize: cv2.typing.Size, + iFixedPoint: int, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + newObjPoints: cv2.typing.MatLike | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], + cv2.typing.MatLike, +]: ... + + +@typing.overload +def calibrateCameraRO( + objectPoints: typing.Sequence[UMat], + imagePoints: typing.Sequence[UMat], + imageSize: cv2.typing.Size, + iFixedPoint: int, + cameraMatrix: UMat, + distCoeffs: UMat, + rvecs: typing.Sequence[UMat] | None = ..., + tvecs: typing.Sequence[UMat] | None = ..., + newObjPoints: UMat | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + UMat, + UMat, + typing.Sequence[UMat], + typing.Sequence[UMat], + UMat, +]: ... + + +@typing.overload +def calibrateCameraROExtended( + objectPoints: typing.Sequence[cv2.typing.MatLike], + imagePoints: typing.Sequence[cv2.typing.MatLike], + imageSize: cv2.typing.Size, + iFixedPoint: int, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + newObjPoints: cv2.typing.MatLike | None = ..., + stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., + stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., + stdDeviationsObjPoints: cv2.typing.MatLike | None = ..., + perViewErrors: cv2.typing.MatLike | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def calibrateCameraROExtended( + objectPoints: typing.Sequence[UMat], + imagePoints: typing.Sequence[UMat], + imageSize: cv2.typing.Size, + iFixedPoint: int, + cameraMatrix: UMat, + distCoeffs: UMat, + rvecs: typing.Sequence[UMat] | None = ..., + tvecs: typing.Sequence[UMat] | None = ..., + newObjPoints: UMat | None = ..., + stdDeviationsIntrinsics: UMat | None = ..., + stdDeviationsExtrinsics: UMat | None = ..., + stdDeviationsObjPoints: UMat | None = ..., + perViewErrors: UMat | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + UMat, + UMat, + typing.Sequence[UMat], + typing.Sequence[UMat], + UMat, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def calibrateHandEye( + R_gripper2base: typing.Sequence[cv2.typing.MatLike], + t_gripper2base: typing.Sequence[cv2.typing.MatLike], + R_target2cam: typing.Sequence[cv2.typing.MatLike], + t_target2cam: typing.Sequence[cv2.typing.MatLike], + R_cam2gripper: cv2.typing.MatLike | None = ..., + t_cam2gripper: cv2.typing.MatLike | None = ..., + method: HandEyeCalibrationMethod = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def calibrateHandEye( + R_gripper2base: typing.Sequence[UMat], + t_gripper2base: typing.Sequence[UMat], + R_target2cam: typing.Sequence[UMat], + t_target2cam: typing.Sequence[UMat], + R_cam2gripper: UMat | None = ..., + t_cam2gripper: UMat | None = ..., + method: HandEyeCalibrationMethod = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def calibrateRobotWorldHandEye( + R_world2cam: typing.Sequence[cv2.typing.MatLike], + t_world2cam: typing.Sequence[cv2.typing.MatLike], + R_base2gripper: typing.Sequence[cv2.typing.MatLike], + t_base2gripper: typing.Sequence[cv2.typing.MatLike], + R_base2world: cv2.typing.MatLike | None = ..., + t_base2world: cv2.typing.MatLike | None = ..., + R_gripper2cam: cv2.typing.MatLike | None = ..., + t_gripper2cam: cv2.typing.MatLike | None = ..., + method: RobotWorldHandEyeCalibrationMethod = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def calibrateRobotWorldHandEye( + R_world2cam: typing.Sequence[UMat], + t_world2cam: typing.Sequence[UMat], + R_base2gripper: typing.Sequence[UMat], + t_base2gripper: typing.Sequence[UMat], + R_base2world: UMat | None = ..., + t_base2world: UMat | None = ..., + R_gripper2cam: UMat | None = ..., + t_gripper2cam: UMat | None = ..., + method: RobotWorldHandEyeCalibrationMethod = ..., +) -> tuple[ + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def calibrationMatrixValues( + cameraMatrix: cv2.typing.MatLike, + imageSize: cv2.typing.Size, + apertureWidth: float, + apertureHeight: float, +) -> tuple[ + float, + float, + float, + cv2.typing.Point2d, + float, +]: ... + + +@typing.overload +def calibrationMatrixValues( + cameraMatrix: UMat, + imageSize: cv2.typing.Size, + apertureWidth: float, + apertureHeight: float, +) -> tuple[ + float, + float, + float, + cv2.typing.Point2d, + float, +]: ... + + +@typing.overload +def cartToPolar( + x: cv2.typing.MatLike, + y: cv2.typing.MatLike, + magnitude: cv2.typing.MatLike | None = ..., + angle: cv2.typing.MatLike | None = ..., + angleInDegrees: bool = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def cartToPolar( + x: UMat, + y: UMat, + magnitude: UMat | None = ..., + angle: UMat | None = ..., + angleInDegrees: bool = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def checkChessboard(img: cv2.typing.MatLike, size: cv2.typing.Size) -> bool: ... +@typing.overload +def checkChessboard(img: UMat, size: cv2.typing.Size) -> bool: ... + + +def checkHardwareSupport(feature: int) -> bool: ... + + +@typing.overload +def checkRange( + a: cv2.typing.MatLike, + quiet: bool = ..., + minVal: float = ..., + maxVal: float = ..., +) -> tuple[ + bool, + cv2.typing.Point, +]: ... + + +@typing.overload +def checkRange( + a: UMat, + quiet: bool = ..., + minVal: float = ..., + maxVal: float = ..., +) -> tuple[ + bool, + cv2.typing.Point, +]: ... + + +@typing.overload +def circle( + img: cv2.typing.MatLike, center: cv2.typing.Point, radius: int, color: cv2.typing.Scalar, + thickness: int = ..., lineType: int = ..., shift: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def circle( + img: UMat, + center: cv2.typing.Point, + radius: int, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> UMat: ... + + +def clipLine( + imgRect: cv2.typing.Rect, + pt1: cv2.typing.Point, + pt2: cv2.typing.Point, +) -> tuple[ + bool, + cv2.typing.Point, + cv2.typing.Point, +]: ... + + +@typing.overload +def colorChange( + src: cv2.typing.MatLike, mask: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., + red_mul: float = ..., green_mul: float = ..., blue_mul: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def colorChange( + src: UMat, + mask: UMat, + dst: UMat | None = ..., + red_mul: float = ..., + green_mul: float = ..., + blue_mul: float = ..., +) -> UMat: ... + + +@typing.overload +def compare( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, cmpop: int, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def compare(src1: UMat, src2: UMat, cmpop: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def compareHist(H1: cv2.typing.MatLike, H2: cv2.typing.MatLike, method: int) -> float: ... +@typing.overload +def compareHist(H1: UMat, H2: UMat, method: int) -> float: ... + + +@typing.overload +def completeSymm(m: cv2.typing.MatLike, lowerToUpper: bool = ...) -> cv2.typing.MatLike: ... +@typing.overload +def completeSymm(m: UMat, lowerToUpper: bool = ...) -> UMat: ... + + +@typing.overload +def composeRT( + rvec1: cv2.typing.MatLike, + tvec1: cv2.typing.MatLike, + rvec2: cv2.typing.MatLike, + tvec2: cv2.typing.MatLike, + rvec3: cv2.typing.MatLike | None = ..., + tvec3: cv2.typing.MatLike | None = ..., + dr3dr1: cv2.typing.MatLike | None = ..., + dr3dt1: cv2.typing.MatLike | None = ..., + dr3dr2: cv2.typing.MatLike | None = ..., + dr3dt2: cv2.typing.MatLike | None = ..., + dt3dr1: cv2.typing.MatLike | None = ..., + dt3dt1: cv2.typing.MatLike | None = ..., + dt3dr2: cv2.typing.MatLike | None = ..., + dt3dt2: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def composeRT( + rvec1: UMat, + tvec1: UMat, + rvec2: UMat, + tvec2: UMat, + rvec3: UMat | None = ..., + tvec3: UMat | None = ..., + dr3dr1: UMat | None = ..., + dr3dt1: UMat | None = ..., + dr3dr2: UMat | None = ..., + dr3dt2: UMat | None = ..., + dt3dr1: UMat | None = ..., + dt3dt1: UMat | None = ..., + dt3dr2: UMat | None = ..., + dt3dt2: UMat | None = ..., +) -> tuple[ + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def computeCorrespondEpilines( + points: cv2.typing.MatLike, + whichImage: int, + F: cv2.typing.MatLike, + lines: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def computeCorrespondEpilines(points: UMat, whichImage: int, F: UMat, lines: UMat | None = ...) -> UMat: ... + + +@typing.overload +def computeECC( + templateImage: cv2.typing.MatLike, inputImage: cv2.typing.MatLike, + inputMask: cv2.typing.MatLike | None = ..., +) -> float: ... + + +@typing.overload +def computeECC(templateImage: UMat, inputImage: UMat, inputMask: UMat | None = ...) -> float: ... + + +@typing.overload +def connectedComponents( + image: cv2.typing.MatLike, + labels: cv2.typing.MatLike | None = ..., + connectivity: int = ..., + ltype: int = ..., +) -> tuple[ + int, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def connectedComponents( + image: UMat, + labels: UMat | None = ..., + connectivity: int = ..., + ltype: int = ..., +) -> tuple[ + int, + UMat, +]: ... + + +@typing.overload +def connectedComponentsWithAlgorithm( + image: cv2.typing.MatLike, + connectivity: int, + ltype: int, + ccltype: int, + labels: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def connectedComponentsWithAlgorithm( + image: UMat, + connectivity: int, + ltype: int, + ccltype: int, + labels: UMat | None = ..., +) -> tuple[ + int, + UMat, +]: ... + + +@typing.overload +def connectedComponentsWithStats( + image: cv2.typing.MatLike, + labels: cv2.typing.MatLike | None = ..., + stats: cv2.typing.MatLike | None = ..., + centroids: cv2.typing.MatLike | None = ..., + connectivity: int = ..., + ltype: int = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def connectedComponentsWithStats( + image: UMat, + labels: UMat | None = ..., + stats: UMat | None = ..., + centroids: UMat | None = ..., + connectivity: int = ..., + ltype: int = ..., +) -> tuple[ + int, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def connectedComponentsWithStatsWithAlgorithm( + image: cv2.typing.MatLike, + connectivity: int, + ltype: int, + ccltype: int, + labels: cv2.typing.MatLike | None = ..., + stats: cv2.typing.MatLike | None = ..., + centroids: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def connectedComponentsWithStatsWithAlgorithm( + image: UMat, + connectivity: int, + ltype: int, + ccltype: int, + labels: UMat | None = ..., + stats: UMat | None = ..., + centroids: UMat | None = ..., +) -> tuple[ + int, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def contourArea(contour: cv2.typing.MatLike, oriented: bool = ...) -> float: ... +@typing.overload +def contourArea(contour: UMat, oriented: bool = ...) -> float: ... + + +@typing.overload +def convertFp16(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def convertFp16(src: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def convertMaps( + map1: cv2.typing.MatLike, + map2: cv2.typing.MatLike, + dstmap1type: int, + dstmap1: cv2.typing.MatLike | None = ..., + dstmap2: cv2.typing.MatLike | None = ..., + nninterpolation: bool = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def convertMaps( + map1: UMat, + map2: UMat, + dstmap1type: int, + dstmap1: UMat | None = ..., + dstmap2: UMat | None = ..., + nninterpolation: bool = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def convertPointsFromHomogeneous( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def convertPointsFromHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def convertPointsToHomogeneous(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def convertPointsToHomogeneous(src: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def convertScaleAbs( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., + alpha: float = ..., beta: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def convertScaleAbs(src: UMat, dst: UMat | None = ..., alpha: float = ..., beta: float = ...) -> UMat: ... + + +@typing.overload +def convexHull( + points: cv2.typing.MatLike, hull: cv2.typing.MatLike | None = ..., + clockwise: bool = ..., returnPoints: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def convexHull(points: UMat, hull: UMat | None = ..., clockwise: bool = ..., returnPoints: bool = ...) -> UMat: ... + + +@typing.overload +def convexityDefects( + contour: cv2.typing.MatLike, convexhull: cv2.typing.MatLike, + convexityDefects: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def convexityDefects(contour: UMat, convexhull: UMat, convexityDefects: UMat | None = ...) -> UMat: ... + + +@typing.overload +def copyMakeBorder( + src: cv2.typing.MatLike, + top: int, + bottom: int, + left: int, + right: int, + borderType: int, + dst: cv2.typing.MatLike | None = ..., + value: cv2.typing.Scalar = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def copyMakeBorder( + src: UMat, top: int, bottom: int, left: int, right: int, borderType: int, + dst: UMat | None = ..., value: cv2.typing.Scalar = ..., +) -> UMat: ... + + +@typing.overload +def copyTo( + src: cv2.typing.MatLike, mask: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def copyTo(src: UMat, mask: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def cornerEigenValsAndVecs( + src: cv2.typing.MatLike, + blockSize: int, + ksize: int, + dst: cv2.typing.MatLike | None = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def cornerEigenValsAndVecs( + src: UMat, + blockSize: int, + ksize: int, + dst: UMat | None = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def cornerHarris( + src: cv2.typing.MatLike, + blockSize: int, + ksize: int, + k: float, + dst: cv2.typing.MatLike | None = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def cornerHarris( + src: UMat, + blockSize: int, + ksize: int, + k: float, + dst: UMat | None = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def cornerMinEigenVal( + src: cv2.typing.MatLike, + blockSize: int, + dst: cv2.typing.MatLike | None = ..., + ksize: int = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def cornerMinEigenVal( + src: UMat, + blockSize: int, + dst: UMat | None = ..., + ksize: int = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def cornerSubPix( + image: cv2.typing.MatLike, corners: cv2.typing.MatLike, winSize: cv2.typing.Size, + zeroZone: cv2.typing.Size, criteria: cv2.typing.TermCriteria, +) -> cv2.typing.MatLike: ... + + +@typing.overload +def cornerSubPix( + image: UMat, + corners: UMat, + winSize: cv2.typing.Size, + zeroZone: cv2.typing.Size, + criteria: cv2.typing.TermCriteria, +) -> UMat: ... + + +@typing.overload +def correctMatches( + F: cv2.typing.MatLike, + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + newPoints1: cv2.typing.MatLike | None = ..., + newPoints2: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def correctMatches( + F: UMat, + points1: UMat, + points2: UMat, + newPoints1: UMat | None = ..., + newPoints2: UMat | None = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def countNonZero(src: cv2.typing.MatLike) -> int: ... +@typing.overload +def countNonZero(src: UMat) -> int: ... + + +def createAlignMTB(max_bits: int = ..., exclude_range: int = ..., cut: bool = ...) -> AlignMTB: ... + + +def createBackgroundSubtractorKNN( + history: int = ..., dist2Threshold: float = ..., + detectShadows: bool = ..., +) -> BackgroundSubtractorKNN: ... + + +def createBackgroundSubtractorMOG2( + history: int = ..., varThreshold: float = ..., + detectShadows: bool = ..., +) -> BackgroundSubtractorMOG2: ... + + +def createCLAHE(clipLimit: float = ..., tileGridSize: cv2.typing.Size = ...) -> CLAHE: ... + + +def createCalibrateDebevec(samples: int = ..., lambda_: float = ..., random: bool = ...) -> CalibrateDebevec: ... + + +def createCalibrateRobertson(max_iter: int = ..., threshold: float = ...) -> CalibrateRobertson: ... + + +def createGeneralizedHoughBallard() -> GeneralizedHoughBallard: ... + + +def createGeneralizedHoughGuil() -> GeneralizedHoughGuil: ... + + +@typing.overload +def createHanningWindow( + winSize: cv2.typing.Size, + type: int, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def createHanningWindow(winSize: cv2.typing.Size, type: int, dst: UMat | None = ...) -> UMat: ... + + +def createLineSegmentDetector( + refine: int = ..., + scale: float = ..., + sigma_scale: float = ..., + quant: float = ..., + ang_th: float = ..., + log_eps: float = ..., + density_th: float = ..., + n_bins: int = ..., +) -> LineSegmentDetector: ... + + +def createMergeDebevec() -> MergeDebevec: ... + + +def createMergeMertens( + contrast_weight: float = ..., + saturation_weight: float = ..., + exposure_weight: float = ..., +) -> MergeMertens: ... + + +def createMergeRobertson() -> MergeRobertson: ... + + +def createTonemap(gamma: float = ...) -> Tonemap: ... + + +def createTonemapDrago(gamma: float = ..., saturation: float = ..., bias: float = ...) -> TonemapDrago: ... + + +def createTonemapMantiuk(gamma: float = ..., scale: float = ..., saturation: float = ...) -> TonemapMantiuk: ... + + +def createTonemapReinhard( + gamma: float = ..., + intensity: float = ..., + light_adapt: float = ..., + color_adapt: float = ..., +) -> TonemapReinhard: ... + + +def cubeRoot(val: float) -> float: ... + + +@typing.overload +def cvtColor( + src: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike | + None = ..., dstCn: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def cvtColor(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ... + + +@typing.overload +def cvtColorTwoPlane( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, code: int, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def cvtColorTwoPlane(src1: UMat, src2: UMat, code: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def dct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... +@typing.overload +def dct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ... + + +@typing.overload +def decolor( + src: cv2.typing.MatLike, + grayscale: cv2.typing.MatLike | None = ..., + color_boost: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def decolor(src: UMat, grayscale: UMat | None = ..., color_boost: UMat | None = ...) -> tuple[UMat, UMat]: ... + + +@typing.overload +def decomposeEssentialMat( + E: cv2.typing.MatLike, + R1: cv2.typing.MatLike | None = ..., + R2: cv2.typing.MatLike | None = ..., + t: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def decomposeEssentialMat( + E: UMat, + R1: UMat | None = ..., + R2: UMat | None = ..., + t: UMat | None = ..., +) -> tuple[ + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def decomposeHomographyMat( + H: cv2.typing.MatLike, + K: cv2.typing.MatLike, + rotations: typing.Sequence[cv2.typing.MatLike] | None = ..., + translations: typing.Sequence[cv2.typing.MatLike] | None = ..., + normals: typing.Sequence[cv2.typing.MatLike] | None = ..., +) -> tuple[ + int, + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], +]: ... + + +@typing.overload +def decomposeHomographyMat( + H: UMat, + K: UMat, + rotations: typing.Sequence[UMat] | None = ..., + translations: typing.Sequence[UMat] | None = ..., + normals: typing.Sequence[UMat] | None = ..., +) -> tuple[ + int, + typing.Sequence[UMat], + typing.Sequence[UMat], + typing.Sequence[UMat], +]: ... + + +@typing.overload +def decomposeProjectionMatrix( + projMatrix: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike | None = ..., + rotMatrix: cv2.typing.MatLike | None = ..., + transVect: cv2.typing.MatLike | None = ..., + rotMatrixX: cv2.typing.MatLike | None = ..., + rotMatrixY: cv2.typing.MatLike | None = ..., + rotMatrixZ: cv2.typing.MatLike | None = ..., + eulerAngles: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def decomposeProjectionMatrix( + projMatrix: UMat, + cameraMatrix: UMat | None = ..., + rotMatrix: UMat | None = ..., + transVect: UMat | None = ..., + rotMatrixX: UMat | None = ..., + rotMatrixY: UMat | None = ..., + rotMatrixZ: UMat | None = ..., + eulerAngles: UMat | None = ..., +) -> tuple[ + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def demosaicing( + src: cv2.typing.MatLike, code: int, dst: cv2.typing.MatLike | + None = ..., dstCn: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def demosaicing(src: UMat, code: int, dst: UMat | None = ..., dstCn: int = ...) -> UMat: ... + + +def denoise_TVL1( + observations: typing.Sequence[cv2.typing.MatLike], + result: cv2.typing.MatLike, + lambda_: float = ..., + niters: int = ..., +) -> None: ... + + +def destroyAllWindows() -> None: ... + + +def destroyWindow(winname: str) -> None: ... + + +@typing.overload +def detailEnhance( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + sigma_s: float = ..., + sigma_r: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def detailEnhance(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... + + +@typing.overload +def determinant(mtx: cv2.typing.MatLike) -> float: ... +@typing.overload +def determinant(mtx: UMat) -> float: ... + + +@typing.overload +def dft( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., + flags: int = ..., nonzeroRows: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def dft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ... + + +@typing.overload +def dilate( + src: cv2.typing.MatLike, + kernel: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + anchor: cv2.typing.Point = ..., + iterations: int = ..., + borderType: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def dilate( + src: UMat, + kernel: UMat, + dst: UMat | None = ..., + anchor: cv2.typing.Point = ..., + iterations: int = ..., + borderType: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> UMat: ... + + +def displayOverlay(winname: str, text: str, delayms: int = ...) -> None: ... + + +def displayStatusBar(winname: str, text: str, delayms: int = ...) -> None: ... + + +@typing.overload +def distanceTransform( + src: cv2.typing.MatLike, + distanceType: int, + maskSize: int, + dst: cv2.typing.MatLike | None = ..., + dstType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def distanceTransform( + src: UMat, + distanceType: int, + maskSize: int, + dst: UMat | None = ..., + dstType: int = ..., +) -> UMat: ... + + +@typing.overload +def distanceTransformWithLabels( + src: cv2.typing.MatLike, + distanceType: int, + maskSize: int, + dst: cv2.typing.MatLike | None = ..., + labels: cv2.typing.MatLike | None = ..., + labelType: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def distanceTransformWithLabels( + src: UMat, + distanceType: int, + maskSize: int, + dst: UMat | None = ..., + labels: UMat | None = ..., + labelType: int = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def divSpectrums( + a: cv2.typing.MatLike, + b: cv2.typing.MatLike, + flags: int, + c: cv2.typing.MatLike | None = ..., + conjB: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def divSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ... + + +@typing.overload +def divide( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | + None = ..., scale: float = ..., dtype: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def divide(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... + + +@typing.overload +def divide( + scale: float, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | + None = ..., dtype: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def divide(scale: float, src2: UMat, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... + + +@typing.overload +def drawChessboardCorners( + image: cv2.typing.MatLike, + patternSize: cv2.typing.Size, + corners: cv2.typing.MatLike, + patternWasFound: bool, +) -> cv2.typing.MatLike: ... + + +@typing.overload +def drawChessboardCorners(image: UMat, patternSize: cv2.typing.Size, corners: UMat, patternWasFound: bool) -> UMat: ... + + +@typing.overload +def drawContours( + image: cv2.typing.MatLike, + contours: typing.Sequence[cv2.typing.MatLike], + contourIdx: int, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + hierarchy: cv2.typing.MatLike | None = ..., + maxLevel: int = ..., + offset: cv2.typing.Point = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def drawContours( + image: UMat, + contours: typing.Sequence[UMat], + contourIdx: int, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + hierarchy: UMat | None = ..., + maxLevel: int = ..., + offset: cv2.typing.Point = ..., +) -> UMat: ... + + +@typing.overload +def drawFrameAxes( + image: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvec: cv2.typing.MatLike, + tvec: cv2.typing.MatLike, + length: float, + thickness: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def drawFrameAxes( + image: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + rvec: UMat, + tvec: UMat, + length: float, + thickness: int = ..., +) -> UMat: ... + + +@typing.overload +def drawKeypoints( + image: cv2.typing.MatLike, + keypoints: typing.Sequence[KeyPoint], + outImage: cv2.typing.MatLike, + color: cv2.typing.Scalar = ..., + flags: DrawMatchesFlags = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def drawKeypoints( + image: UMat, + keypoints: typing.Sequence[KeyPoint], + outImage: UMat, + color: cv2.typing.Scalar = ..., + flags: DrawMatchesFlags = ..., +) -> UMat: ... + + +@typing.overload +def drawMarker( + img: cv2.typing.MatLike, + position: cv2.typing.Point, + color: cv2.typing.Scalar, + markerType: int = ..., + markerSize: int = ..., + thickness: int = ..., + line_type: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def drawMarker( + img: UMat, + position: cv2.typing.Point, + color: cv2.typing.Scalar, + markerType: int = ..., + markerSize: int = ..., + thickness: int = ..., + line_type: int = ..., +) -> UMat: ... + + +@typing.overload +def drawMatches( + img1: cv2.typing.MatLike, + keypoints1: typing.Sequence[KeyPoint], + img2: cv2.typing.MatLike, + keypoints2: typing.Sequence[KeyPoint], + matches1to2: typing.Sequence[DMatch], + outImg: cv2.typing.MatLike, + matchColor: cv2.typing.Scalar = ..., + singlePointColor: cv2.typing.Scalar = ..., + matchesMask: typing.Sequence[str] = ..., + flags: DrawMatchesFlags = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def drawMatches( + img1: UMat, + keypoints1: typing.Sequence[KeyPoint], + img2: UMat, + keypoints2: typing.Sequence[KeyPoint], + matches1to2: typing.Sequence[DMatch], + outImg: UMat, + matchColor: cv2.typing.Scalar = ..., + singlePointColor: cv2.typing.Scalar = ..., + matchesMask: typing.Sequence[str] = ..., + flags: DrawMatchesFlags = ..., +) -> UMat: ... + + +@typing.overload +def drawMatches( + img1: cv2.typing.MatLike, + keypoints1: typing.Sequence[KeyPoint], + img2: cv2.typing.MatLike, + keypoints2: typing.Sequence[KeyPoint], + matches1to2: typing.Sequence[DMatch], + outImg: cv2.typing.MatLike, + matchesThickness: int, + matchColor: cv2.typing.Scalar = ..., + singlePointColor: cv2.typing.Scalar = ..., + matchesMask: typing.Sequence[str] = ..., + flags: DrawMatchesFlags = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def drawMatches( + img1: UMat, + keypoints1: typing.Sequence[KeyPoint], + img2: UMat, + keypoints2: typing.Sequence[KeyPoint], + matches1to2: typing.Sequence[DMatch], + outImg: UMat, + matchesThickness: int, + matchColor: cv2.typing.Scalar = ..., + singlePointColor: cv2.typing.Scalar = ..., + matchesMask: typing.Sequence[str] = ..., + flags: DrawMatchesFlags = ..., +) -> UMat: ... + + +@typing.overload +def drawMatchesKnn( + img1: cv2.typing.MatLike, + keypoints1: typing.Sequence[KeyPoint], + img2: cv2.typing.MatLike, + keypoints2: typing.Sequence[KeyPoint], + matches1to2: typing.Sequence[typing.Sequence[DMatch]], + outImg: cv2.typing.MatLike, + matchColor: cv2.typing.Scalar = ..., + singlePointColor: cv2.typing.Scalar = ..., + matchesMask: typing.Sequence[typing.Sequence[str]] = ..., + flags: DrawMatchesFlags = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def drawMatchesKnn( + img1: UMat, + keypoints1: typing.Sequence[KeyPoint], + img2: UMat, + keypoints2: typing.Sequence[KeyPoint], + matches1to2: typing.Sequence[typing.Sequence[DMatch]], + outImg: UMat, + matchColor: cv2.typing.Scalar = ..., + singlePointColor: cv2.typing.Scalar = ..., + matchesMask: typing.Sequence[typing.Sequence[str]] = ..., + flags: DrawMatchesFlags = ..., +) -> UMat: ... + + +@typing.overload +def edgePreservingFilter( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + flags: int = ..., + sigma_s: float = ..., + sigma_r: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def edgePreservingFilter( + src: UMat, + dst: UMat | None = ..., + flags: int = ..., + sigma_s: float = ..., + sigma_r: float = ..., +) -> UMat: ... + + +@typing.overload +def eigen( + src: cv2.typing.MatLike, + eigenvalues: cv2.typing.MatLike | None = ..., + eigenvectors: cv2.typing.MatLike | None = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def eigen(src: UMat, eigenvalues: UMat | None = ..., eigenvectors: UMat | None = ...) -> tuple[bool, UMat, UMat]: ... + + +@typing.overload +def eigenNonSymmetric( + src: cv2.typing.MatLike, + eigenvalues: cv2.typing.MatLike | None = ..., + eigenvectors: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def eigenNonSymmetric( + src: UMat, + eigenvalues: UMat | None = ..., + eigenvectors: UMat | None = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def ellipse( + img: cv2.typing.MatLike, + center: cv2.typing.Point, + axes: cv2.typing.Size, + angle: float, + startAngle: float, + endAngle: float, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def ellipse( + img: UMat, + center: cv2.typing.Point, + axes: cv2.typing.Size, + angle: float, + startAngle: float, + endAngle: float, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> UMat: ... + + +@typing.overload +def ellipse( + img: cv2.typing.MatLike, + box: cv2.typing.RotatedRect, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def ellipse( + img: UMat, + box: cv2.typing.RotatedRect, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., +) -> UMat: ... + + +def ellipse2Poly( + center: cv2.typing.Point, axes: cv2.typing.Size, angle: int, arcStart: int, + arcEnd: int, delta: int, +) -> typing.Sequence[cv2.typing.Point]: ... + + +def empty_array_desc() -> GArrayDesc: ... + + +def empty_gopaque_desc() -> GOpaqueDesc: ... + + +def empty_scalar_desc() -> GScalarDesc: ... + + +@typing.overload +def equalizeHist(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def equalizeHist(src: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def erode( + src: cv2.typing.MatLike, + kernel: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + anchor: cv2.typing.Point = ..., + iterations: int = ..., + borderType: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def erode( + src: UMat, + kernel: UMat, + dst: UMat | None = ..., + anchor: cv2.typing.Point = ..., + iterations: int = ..., + borderType: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> UMat: ... + + +@typing.overload +def estimateAffine2D( + from_: cv2.typing.MatLike, + to: cv2.typing.MatLike, + inliers: cv2.typing.MatLike | None = ..., + method: int = ..., + ransacReprojThreshold: float = ..., + maxIters: int = ..., + confidence: float = ..., + refineIters: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def estimateAffine2D( + from_: UMat, + to: UMat, + inliers: UMat | None = ..., + method: int = ..., + ransacReprojThreshold: float = ..., + maxIters: int = ..., + confidence: float = ..., + refineIters: int = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def estimateAffine2D( + pts1: cv2.typing.MatLike, + pts2: cv2.typing.MatLike, + params: UsacParams, + inliers: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def estimateAffine2D( + pts1: UMat, + pts2: UMat, + params: UsacParams, + inliers: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def estimateAffine3D( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike, + out: cv2.typing.MatLike | None = ..., + inliers: cv2.typing.MatLike | None = ..., + ransacThreshold: float = ..., + confidence: float = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def estimateAffine3D( + src: UMat, + dst: UMat, + out: UMat | None = ..., + inliers: UMat | None = ..., + ransacThreshold: float = ..., + confidence: float = ..., +) -> tuple[ + int, + UMat, + UMat, +]: ... + + +@typing.overload +def estimateAffine3D( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike, + force_rotation: bool = ..., +) -> tuple[ + cv2.typing.MatLike, + float, +]: ... + + +@typing.overload +def estimateAffine3D(src: UMat, dst: UMat, force_rotation: bool = ...) -> tuple[cv2.typing.MatLike, float]: ... + + +@typing.overload +def estimateAffinePartial2D( + from_: cv2.typing.MatLike, + to: cv2.typing.MatLike, + inliers: cv2.typing.MatLike | None = ..., + method: int = ..., + ransacReprojThreshold: float = ..., + maxIters: int = ..., + confidence: float = ..., + refineIters: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def estimateAffinePartial2D( + from_: UMat, + to: UMat, + inliers: UMat | None = ..., + method: int = ..., + ransacReprojThreshold: float = ..., + maxIters: int = ..., + confidence: float = ..., + refineIters: int = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def estimateChessboardSharpness( + image: cv2.typing.MatLike, + patternSize: cv2.typing.Size, + corners: cv2.typing.MatLike, + rise_distance: float = ..., + vertical: bool = ..., + sharpness: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.Scalar, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def estimateChessboardSharpness( + image: UMat, + patternSize: cv2.typing.Size, + corners: UMat, + rise_distance: float = ..., + vertical: bool = ..., + sharpness: UMat | None = ..., +) -> tuple[ + cv2.typing.Scalar, + UMat, +]: ... + + +@typing.overload +def estimateTranslation3D( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike, + out: cv2.typing.MatLike | None = ..., + inliers: cv2.typing.MatLike | None = ..., + ransacThreshold: float = ..., + confidence: float = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def estimateTranslation3D( + src: UMat, + dst: UMat, + out: UMat | None = ..., + inliers: UMat | None = ..., + ransacThreshold: float = ..., + confidence: float = ..., +) -> tuple[ + int, + UMat, + UMat, +]: ... + + +@typing.overload +def exp(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def exp(src: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def extractChannel(src: cv2.typing.MatLike, coi: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def extractChannel(src: UMat, coi: int, dst: UMat | None = ...) -> UMat: ... + + +def fastAtan2(y: float, x: float) -> float: ... + + +@typing.overload +def fastNlMeansDenoising( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + h: float = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fastNlMeansDenoising( + src: UMat, + dst: UMat | None = ..., + h: float = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., +) -> UMat: ... + + +@typing.overload +def fastNlMeansDenoising( + src: cv2.typing.MatLike, + h: typing.Sequence[float], + dst: cv2.typing.MatLike | None = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., + normType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fastNlMeansDenoising( + src: UMat, + h: typing.Sequence[float], + dst: UMat | None = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., + normType: int = ..., +) -> UMat: ... + + +@typing.overload +def fastNlMeansDenoisingColored( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + h: float = ..., + hColor: float = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fastNlMeansDenoisingColored( + src: UMat, + dst: UMat | None = ..., + h: float = ..., + hColor: float = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., +) -> UMat: ... + + +@typing.overload +def fastNlMeansDenoisingColoredMulti( + srcImgs: typing.Sequence[cv2.typing.MatLike], + imgToDenoiseIndex: int, + temporalWindowSize: int, + dst: cv2.typing.MatLike | None = ..., + h: float = ..., + hColor: float = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fastNlMeansDenoisingColoredMulti( + srcImgs: typing.Sequence[UMat], + imgToDenoiseIndex: int, + temporalWindowSize: int, + dst: UMat | None = ..., + h: float = ..., + hColor: float = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., +) -> UMat: ... + + +@typing.overload +def fastNlMeansDenoisingMulti( + srcImgs: typing.Sequence[cv2.typing.MatLike], + imgToDenoiseIndex: int, + temporalWindowSize: int, + dst: cv2.typing.MatLike | None = ..., + h: float = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fastNlMeansDenoisingMulti( + srcImgs: typing.Sequence[UMat], + imgToDenoiseIndex: int, + temporalWindowSize: int, + dst: UMat | None = ..., + h: float = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., +) -> UMat: ... + + +@typing.overload +def fastNlMeansDenoisingMulti( + srcImgs: typing.Sequence[cv2.typing.MatLike], + imgToDenoiseIndex: int, + temporalWindowSize: int, + h: typing.Sequence[float], + dst: cv2.typing.MatLike | None = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., + normType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fastNlMeansDenoisingMulti( + srcImgs: typing.Sequence[UMat], + imgToDenoiseIndex: int, + temporalWindowSize: int, + h: typing.Sequence[float], + dst: UMat | None = ..., + templateWindowSize: int = ..., + searchWindowSize: int = ..., + normType: int = ..., +) -> UMat: ... + + +@typing.overload +def fillConvexPoly( + img: cv2.typing.MatLike, + points: cv2.typing.MatLike, + color: cv2.typing.Scalar, + lineType: int = ..., + shift: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fillConvexPoly( + img: UMat, + points: UMat, + color: cv2.typing.Scalar, + lineType: int = ..., + shift: int = ..., +) -> UMat: ... + + +@typing.overload +def fillPoly( + img: cv2.typing.MatLike, + pts: typing.Sequence[cv2.typing.MatLike], + color: cv2.typing.Scalar, + lineType: int = ..., + shift: int = ..., + offset: cv2.typing.Point = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fillPoly( + img: UMat, + pts: typing.Sequence[UMat], + color: cv2.typing.Scalar, + lineType: int = ..., + shift: int = ..., + offset: cv2.typing.Point = ..., +) -> UMat: ... + + +@typing.overload +def filter2D( + src: cv2.typing.MatLike, + ddepth: int, + kernel: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + anchor: cv2.typing.Point = ..., + delta: float = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def filter2D( + src: UMat, + ddepth: int, + kernel: UMat, + dst: UMat | None = ..., + anchor: cv2.typing.Point = ..., + delta: float = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def filterHomographyDecompByVisibleRefpoints( + rotations: typing.Sequence[cv2.typing.MatLike], + normals: typing.Sequence[cv2.typing.MatLike], + beforePoints: cv2.typing.MatLike, + afterPoints: cv2.typing.MatLike, + possibleSolutions: cv2.typing.MatLike | None = ..., + pointsMask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def filterHomographyDecompByVisibleRefpoints( + rotations: typing.Sequence[UMat], + normals: typing.Sequence[UMat], + beforePoints: UMat, + afterPoints: UMat, + possibleSolutions: UMat | None = ..., + pointsMask: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def filterSpeckles( + img: cv2.typing.MatLike, + newVal: float, + maxSpeckleSize: int, + maxDiff: float, + buf: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def filterSpeckles( + img: UMat, + newVal: float, + maxSpeckleSize: int, + maxDiff: float, + buf: UMat | None = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def find4QuadCornerSubpix( + img: cv2.typing.MatLike, + corners: cv2.typing.MatLike, + region_size: cv2.typing.Size, +) -> tuple[ + bool, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def find4QuadCornerSubpix(img: UMat, corners: UMat, region_size: cv2.typing.Size) -> tuple[bool, UMat]: ... + + +@typing.overload +def findChessboardCorners( + image: cv2.typing.MatLike, + patternSize: cv2.typing.Size, + corners: cv2.typing.MatLike | None = ..., + flags: int = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findChessboardCorners( + image: UMat, + patternSize: cv2.typing.Size, + corners: UMat | None = ..., + flags: int = ..., +) -> tuple[ + bool, + UMat, +]: ... + + +@typing.overload +def findChessboardCornersSB( + image: cv2.typing.MatLike, + patternSize: cv2.typing.Size, + corners: cv2.typing.MatLike | None = ..., + flags: int = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findChessboardCornersSB( + image: UMat, + patternSize: cv2.typing.Size, + corners: UMat | None = ..., + flags: int = ..., +) -> tuple[ + bool, + UMat, +]: ... + + +@typing.overload +def findChessboardCornersSBWithMeta( + image: cv2.typing.MatLike, + patternSize: cv2.typing.Size, + flags: int, + corners: cv2.typing.MatLike | None = ..., + meta: cv2.typing.MatLike | None = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findChessboardCornersSBWithMeta( + image: UMat, + patternSize: cv2.typing.Size, + flags: int, + corners: UMat | None = ..., + meta: UMat | None = ..., +) -> tuple[ + bool, + UMat, + UMat, +]: ... + + +@typing.overload +def findCirclesGrid( + image: cv2.typing.MatLike, + patternSize: cv2.typing.Size, + flags: int, + blobDetector: cv2.typing.FeatureDetector, + parameters: CirclesGridFinderParameters, + centers: cv2.typing.MatLike | None = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findCirclesGrid( + image: UMat, + patternSize: cv2.typing.Size, + flags: int, + blobDetector: cv2.typing.FeatureDetector, + parameters: CirclesGridFinderParameters, + centers: UMat | None = ..., +) -> tuple[ + bool, + UMat, +]: ... + + +@typing.overload +def findCirclesGrid( + image: cv2.typing.MatLike, + patternSize: cv2.typing.Size, + centers: cv2.typing.MatLike | None = ..., + flags: int = ..., + blobDetector: cv2.typing.FeatureDetector = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findCirclesGrid( + image: UMat, + patternSize: cv2.typing.Size, + centers: UMat | None = ..., + flags: int = ..., + blobDetector: cv2.typing.FeatureDetector = ..., +) -> tuple[ + bool, + UMat, +]: ... + + +@typing.overload +def findContours( + image: cv2.typing.MatLike, + mode: int, + method: int, + contours: typing.Sequence[cv2.typing.MatLike] | None = ..., + hierarchy: cv2.typing.MatLike | None = ..., + offset: cv2.typing.Point = ..., +) -> tuple[ + typing.Sequence[cv2.typing.MatLike], + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findContours( + image: UMat, + mode: int, + method: int, + contours: typing.Sequence[UMat] | None = ..., + hierarchy: UMat | None = ..., + offset: cv2.typing.Point = ..., +) -> tuple[ + typing.Sequence[UMat], + UMat, +]: ... + + +@typing.overload +def findEssentialMat( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + method: int = ..., + prob: float = ..., + threshold: float = ..., + maxIters: int = ..., + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findEssentialMat( + points1: UMat, + points2: UMat, + cameraMatrix: UMat, + method: int = ..., + prob: float = ..., + threshold: float = ..., + maxIters: int = ..., + mask: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findEssentialMat( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + focal: float = ..., + pp: cv2.typing.Point2d = ..., + method: int = ..., + prob: float = ..., + threshold: float = ..., + maxIters: int = ..., + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findEssentialMat( + points1: UMat, + points2: UMat, + focal: float = ..., + pp: cv2.typing.Point2d = ..., + method: int = ..., + prob: float = ..., + threshold: float = ..., + maxIters: int = ..., + mask: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findEssentialMat( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + cameraMatrix1: cv2.typing.MatLike, + distCoeffs1: cv2.typing.MatLike, + cameraMatrix2: cv2.typing.MatLike, + distCoeffs2: cv2.typing.MatLike, + method: int = ..., + prob: float = ..., + threshold: float = ..., + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findEssentialMat( + points1: UMat, + points2: UMat, + cameraMatrix1: UMat, + distCoeffs1: UMat, + cameraMatrix2: UMat, + distCoeffs2: UMat, + method: int = ..., + prob: float = ..., + threshold: float = ..., + mask: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findEssentialMat( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + cameraMatrix1: cv2.typing.MatLike, + cameraMatrix2: cv2.typing.MatLike, + dist_coeff1: cv2.typing.MatLike, + dist_coeff2: cv2.typing.MatLike, + params: UsacParams, + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findEssentialMat( + points1: UMat, + points2: UMat, + cameraMatrix1: UMat, + cameraMatrix2: UMat, + dist_coeff1: UMat, + dist_coeff2: UMat, + params: UsacParams, + mask: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findFundamentalMat( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + method: int, + ransacReprojThreshold: float, + confidence: float, + maxIters: int, + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findFundamentalMat( + points1: UMat, + points2: UMat, + method: int, + ransacReprojThreshold: float, + confidence: float, + maxIters: int, + mask: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findFundamentalMat( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + method: int = ..., + ransacReprojThreshold: float = ..., + confidence: float = ..., + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findFundamentalMat( + points1: UMat, + points2: UMat, + method: int = ..., + ransacReprojThreshold: float = ..., + confidence: float = ..., + mask: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findFundamentalMat( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + params: UsacParams, + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findFundamentalMat( + points1: UMat, + points2: UMat, + params: UsacParams, + mask: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findHomography( + srcPoints: cv2.typing.MatLike, + dstPoints: cv2.typing.MatLike, + method: int = ..., + ransacReprojThreshold: float = ..., + mask: cv2.typing.MatLike | None = ..., + maxIters: int = ..., + confidence: float = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findHomography( + srcPoints: UMat, + dstPoints: UMat, + method: int = ..., + ransacReprojThreshold: float = ..., + mask: UMat | None = ..., + maxIters: int = ..., + confidence: float = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findHomography( + srcPoints: cv2.typing.MatLike, + dstPoints: cv2.typing.MatLike, + params: UsacParams, + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findHomography( + srcPoints: UMat, + dstPoints: UMat, + params: UsacParams, + mask: UMat | None = ..., +) -> tuple[ + cv2.typing.MatLike, + UMat, +]: ... + + +@typing.overload +def findNonZero(src: cv2.typing.MatLike, idx: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def findNonZero(src: UMat, idx: UMat | None = ...) -> UMat: ... + + +@typing.overload +def findTransformECC( + templateImage: cv2.typing.MatLike, + inputImage: cv2.typing.MatLike, + warpMatrix: cv2.typing.MatLike, + motionType: int, + criteria: cv2.typing.TermCriteria, + inputMask: cv2.typing.MatLike, + gaussFiltSize: int, +) -> tuple[ + float, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findTransformECC( + templateImage: UMat, + inputImage: UMat, + warpMatrix: UMat, + motionType: int, + criteria: cv2.typing.TermCriteria, + inputMask: UMat, + gaussFiltSize: int, +) -> tuple[ + float, + UMat, +]: ... + + +@typing.overload +def findTransformECC( + templateImage: cv2.typing.MatLike, + inputImage: cv2.typing.MatLike, + warpMatrix: cv2.typing.MatLike, + motionType: int = ..., + criteria: cv2.typing.TermCriteria = ..., + inputMask: cv2.typing.MatLike | None = ..., +) -> tuple[ + float, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def findTransformECC( + templateImage: UMat, + inputImage: UMat, + warpMatrix: UMat, + motionType: int = ..., + criteria: cv2.typing.TermCriteria = ..., + inputMask: UMat | None = ..., +) -> tuple[ + float, + UMat, +]: ... + + +@typing.overload +def fitEllipse(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... +@typing.overload +def fitEllipse(points: UMat) -> cv2.typing.RotatedRect: ... + + +@typing.overload +def fitEllipseAMS(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... +@typing.overload +def fitEllipseAMS(points: UMat) -> cv2.typing.RotatedRect: ... + + +@typing.overload +def fitEllipseDirect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... +@typing.overload +def fitEllipseDirect(points: UMat) -> cv2.typing.RotatedRect: ... + + +@typing.overload +def fitLine( + points: cv2.typing.MatLike, distType: int, param: float, reps: float, + aeps: float, line: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def fitLine(points: UMat, distType: int, param: float, reps: float, aeps: float, line: UMat | None = ...) -> UMat: ... + + +@typing.overload +def flip(src: cv2.typing.MatLike, flipCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def flip(src: UMat, flipCode: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def flipND(src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def flipND(src: UMat, axis: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def floodFill( + image: cv2.typing.MatLike, + mask: cv2.typing.MatLike, + seedPoint: cv2.typing.Point, + newVal: cv2.typing.Scalar, + loDiff: cv2.typing.Scalar = ..., + upDiff: cv2.typing.Scalar = ..., + flags: int = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.Rect, +]: ... + + +@typing.overload +def floodFill( + image: UMat, + mask: UMat, + seedPoint: cv2.typing.Point, + newVal: cv2.typing.Scalar, + loDiff: cv2.typing.Scalar = ..., + upDiff: cv2.typing.Scalar = ..., + flags: int = ..., +) -> tuple[ + int, + UMat, + UMat, + cv2.typing.Rect, +]: ... + + +@typing.overload +def gemm( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, alpha: float, src3: cv2.typing.MatLike, + beta: float, dst: cv2.typing.MatLike | None = ..., flags: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def gemm( + src1: UMat, + src2: UMat, + alpha: float, + src3: UMat, + beta: float, + dst: UMat | None = ..., + flags: int = ..., +) -> UMat: ... + + +@typing.overload +def getAffineTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike) -> cv2.typing.MatLike: ... +@typing.overload +def getAffineTransform(src: UMat, dst: UMat) -> cv2.typing.MatLike: ... + + +def getBuildInformation() -> str: ... + + +def getCPUFeaturesLine() -> str: ... + + +def getCPUTickCount() -> int: ... + + +@typing.overload +def getDefaultNewCameraMatrix( + cameraMatrix: cv2.typing.MatLike, + imgsize: cv2.typing.Size = ..., + centerPrincipalPoint: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def getDefaultNewCameraMatrix( + cameraMatrix: UMat, imgsize: cv2.typing.Size = ..., + centerPrincipalPoint: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def getDerivKernels( + dx: int, + dy: int, + ksize: int, + kx: cv2.typing.MatLike | None = ..., + ky: cv2.typing.MatLike | None = ..., + normalize: bool = ..., + ktype: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def getDerivKernels( + dx: int, + dy: int, + ksize: int, + kx: UMat | None = ..., + ky: UMat | None = ..., + normalize: bool = ..., + ktype: int = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +def getFontScaleFromHeight(fontFace: int, pixelHeight: int, thickness: int = ...) -> float: ... + + +def getGaborKernel( + ksize: cv2.typing.Size, + sigma: float, + theta: float, + lambd: float, + gamma: float, + psi: float = ..., + ktype: int = ..., +) -> cv2.typing.MatLike: ... + + +def getGaussianKernel(ksize: int, sigma: float, ktype: int = ...) -> cv2.typing.MatLike: ... + + +def getHardwareFeatureName(feature: int) -> str: ... + + +def getLogLevel() -> int: ... + + +def getNumThreads() -> int: ... + + +def getNumberOfCPUs() -> int: ... + + +def getOptimalDFTSize(vecsize: int) -> int: ... + + +@typing.overload +def getOptimalNewCameraMatrix( + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + imageSize: cv2.typing.Size, + alpha: float, + newImgSize: cv2.typing.Size = ..., + centerPrincipalPoint: bool = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.Rect, +]: ... + + +@typing.overload +def getOptimalNewCameraMatrix( + cameraMatrix: UMat, + distCoeffs: UMat, + imageSize: cv2.typing.Size, + alpha: float, + newImgSize: cv2.typing.Size = ..., + centerPrincipalPoint: bool = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.Rect, +]: ... + + +@typing.overload +def getPerspectiveTransform( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike, + solveMethod: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def getPerspectiveTransform(src: UMat, dst: UMat, solveMethod: int = ...) -> cv2.typing.MatLike: ... + + +@typing.overload +def getRectSubPix( + image: cv2.typing.MatLike, patchSize: cv2.typing.Size, center: cv2.typing.Point2f, + patch: cv2.typing.MatLike | None = ..., patchType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def getRectSubPix( + image: UMat, + patchSize: cv2.typing.Size, + center: cv2.typing.Point2f, + patch: UMat | None = ..., + patchType: int = ..., +) -> UMat: ... + + +def getRotationMatrix2D(center: cv2.typing.Point2f, angle: float, scale: float) -> cv2.typing.MatLike: ... + + +def getStructuringElement(shape: int, ksize: cv2.typing.Size, anchor: cv2.typing.Point = ...) -> cv2.typing.MatLike: ... + + +def getTextSize(text: str, fontFace: int, fontScale: float, thickness: int) -> tuple[cv2.typing.Size, int]: ... + + +def getThreadNum() -> int: ... + + +def getTickCount() -> int: ... + + +def getTickFrequency() -> float: ... + + +def getTrackbarPos(trackbarname: str, winname: str) -> int: ... + + +def getValidDisparityROI( + roi1: cv2.typing.Rect, + roi2: cv2.typing.Rect, + minDisparity: int, + numberOfDisparities: int, + blockSize: int, +) -> cv2.typing.Rect: ... + + +def getVersionMajor() -> int: ... + + +def getVersionMinor() -> int: ... + + +def getVersionRevision() -> int: ... + + +def getVersionString() -> str: ... + + +def getWindowImageRect(winname: str) -> cv2.typing.Rect: ... + + +def getWindowProperty(winname: str, prop_id: int) -> float: ... + + +@typing.overload +def goodFeaturesToTrack( + image: cv2.typing.MatLike, + maxCorners: int, + qualityLevel: float, + minDistance: float, + corners: cv2.typing.MatLike | None = ..., + mask: cv2.typing.MatLike | None = ..., + blockSize: int = ..., + useHarrisDetector: bool = ..., + k: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def goodFeaturesToTrack( + image: UMat, + maxCorners: int, + qualityLevel: float, + minDistance: float, + corners: UMat | None = ..., + mask: UMat | None = ..., + blockSize: int = ..., + useHarrisDetector: bool = ..., + k: float = ..., +) -> UMat: ... + + +@typing.overload +def goodFeaturesToTrack( + image: cv2.typing.MatLike, + maxCorners: int, + qualityLevel: float, + minDistance: float, + mask: cv2.typing.MatLike, + blockSize: int, + gradientSize: int, + corners: cv2.typing.MatLike | None = ..., + useHarrisDetector: bool = ..., + k: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def goodFeaturesToTrack( + image: UMat, + maxCorners: int, + qualityLevel: float, + minDistance: float, + mask: UMat, + blockSize: int, + gradientSize: int, + corners: UMat | None = ..., + useHarrisDetector: bool = ..., + k: float = ..., +) -> UMat: ... + + +@typing.overload +def goodFeaturesToTrackWithQuality( + image: cv2.typing.MatLike, + maxCorners: int, + qualityLevel: float, + minDistance: float, + mask: cv2.typing.MatLike, + corners: cv2.typing.MatLike | None = ..., + cornersQuality: cv2.typing.MatLike | None = ..., + blockSize: int = ..., + gradientSize: int = ..., + useHarrisDetector: bool = ..., + k: float = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def goodFeaturesToTrackWithQuality( + image: UMat, + maxCorners: int, + qualityLevel: float, + minDistance: float, + mask: UMat, + corners: UMat | None = ..., + cornersQuality: UMat | None = ..., + blockSize: int = ..., + gradientSize: int = ..., + useHarrisDetector: bool = ..., + k: float = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def grabCut( + img: cv2.typing.MatLike, + mask: cv2.typing.MatLike, + rect: cv2.typing.Rect, + bgdModel: cv2.typing.MatLike, + fgdModel: cv2.typing.MatLike, + iterCount: int, + mode: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def grabCut( + img: UMat, + mask: UMat, + rect: cv2.typing.Rect, + bgdModel: UMat, + fgdModel: UMat, + iterCount: int, + mode: int = ..., +) -> tuple[ + UMat, + UMat, + UMat, +]: ... + + +def groupRectangles( + rectList: typing.Sequence[cv2.typing.Rect], + groupThreshold: int, + eps: float = ..., +) -> tuple[ + typing.Sequence[cv2.typing.Rect], + typing.Sequence[int], +]: ... + + +def haveImageReader(filename: str) -> bool: ... + + +def haveImageWriter(filename: str) -> bool: ... + + +def haveOpenVX() -> bool: ... + + +@typing.overload +def hconcat(src: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def hconcat(src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def idct(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ... +@typing.overload +def idct(src: UMat, dst: UMat | None = ..., flags: int = ...) -> UMat: ... + + +@typing.overload +def idft( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., + flags: int = ..., nonzeroRows: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def idft(src: UMat, dst: UMat | None = ..., flags: int = ..., nonzeroRows: int = ...) -> UMat: ... + + +@typing.overload +def illuminationChange( + src: cv2.typing.MatLike, + mask: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + alpha: float = ..., + beta: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def illuminationChange( + src: UMat, + mask: UMat, + dst: UMat | None = ..., + alpha: float = ..., + beta: float = ..., +) -> UMat: ... + + +def imcount(filename: str, flags: int = ...) -> int: ... + + +@typing.overload +def imdecode(buf: cv2.typing.MatLike, flags: int) -> cv2.typing.MatLike: ... +@typing.overload +def imdecode(buf: UMat, flags: int) -> cv2.typing.MatLike: ... + + +@typing.overload +def imdecodemulti( + buf: cv2.typing.MatLike, + flags: int, + mats: typing.Sequence[cv2.typing.MatLike] | None = ..., +) -> tuple[ + bool, + typing.Sequence[cv2.typing.MatLike], +]: ... + + +@typing.overload +def imdecodemulti( + buf: UMat, + flags: int, + mats: typing.Sequence[cv2.typing.MatLike] | None = ..., +) -> tuple[ + bool, + typing.Sequence[cv2.typing.MatLike], +]: ... + + +@typing.overload +def imencode( + ext: str, + img: cv2.typing.MatLike, + params: typing.Sequence[int] = ..., +) -> tuple[ + bool, + numpy.ndarray[ + typing.Any, + numpy.dtype[numpy.uint8], + ], +]: ... + + +@typing.overload +def imencode( + ext: str, + img: UMat, + params: typing.Sequence[int] = ..., +) -> tuple[ + bool, + numpy.ndarray[ + typing.Any, + numpy.dtype[numpy.uint8], + ], +]: ... + + +def imread(filename: str, flags: int = ...) -> cv2.typing.MatLike: ... + + +@typing.overload +def imreadmulti( + filename: str, + mats: typing.Sequence[cv2.typing.MatLike] | None = ..., + flags: int = ..., +) -> tuple[ + bool, + typing.Sequence[cv2.typing.MatLike], +]: ... + + +@typing.overload +def imreadmulti( + filename: str, + start: int, + count: int, + mats: typing.Sequence[cv2.typing.MatLike] | None = ..., + flags: int = ..., +) -> tuple[ + bool, + typing.Sequence[cv2.typing.MatLike], +]: ... + + +@typing.overload +def imshow(winname: str, mat: cv2.typing.MatLike) -> None: ... +@typing.overload +def imshow(winname: str, mat: cv2.cuda.GpuMat) -> None: ... +@typing.overload +def imshow(winname: str, mat: UMat) -> None: ... + + +@typing.overload +def imwrite(filename: str, img: cv2.typing.MatLike, params: typing.Sequence[int] = ...) -> bool: ... +@typing.overload +def imwrite(filename: str, img: UMat, params: typing.Sequence[int] = ...) -> bool: ... + + +@typing.overload +def imwritemulti( + filename: str, + img: typing.Sequence[cv2.typing.MatLike], + params: typing.Sequence[int] = ..., +) -> bool: ... + + +@typing.overload +def imwritemulti(filename: str, img: typing.Sequence[UMat], params: typing.Sequence[int] = ...) -> bool: ... + + +@typing.overload +def inRange( + src: cv2.typing.MatLike, lowerb: cv2.typing.MatLike, upperb: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def inRange(src: UMat, lowerb: UMat, upperb: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def initCameraMatrix2D( + objectPoints: typing.Sequence[cv2.typing.MatLike], + imagePoints: typing.Sequence[cv2.typing.MatLike], + imageSize: cv2.typing.Size, + aspectRatio: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def initCameraMatrix2D( + objectPoints: typing.Sequence[UMat], + imagePoints: typing.Sequence[UMat], + imageSize: cv2.typing.Size, + aspectRatio: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def initInverseRectificationMap( + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + R: cv2.typing.MatLike, + newCameraMatrix: cv2.typing.MatLike, + size: cv2.typing.Size, + m1type: int, + map1: cv2.typing.MatLike | None = ..., + map2: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def initInverseRectificationMap( + cameraMatrix: UMat, + distCoeffs: UMat, + R: UMat, + newCameraMatrix: UMat, + size: cv2.typing.Size, + m1type: int, + map1: UMat | None = ..., + map2: UMat | None = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def initUndistortRectifyMap( + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + R: cv2.typing.MatLike, + newCameraMatrix: cv2.typing.MatLike, + size: cv2.typing.Size, + m1type: int, + map1: cv2.typing.MatLike | None = ..., + map2: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def initUndistortRectifyMap( + cameraMatrix: UMat, + distCoeffs: UMat, + R: UMat, + newCameraMatrix: UMat, + size: cv2.typing.Size, + m1type: int, + map1: UMat | None = ..., + map2: UMat | None = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def inpaint( + src: cv2.typing.MatLike, inpaintMask: cv2.typing.MatLike, inpaintRadius: float, + flags: int, dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def inpaint(src: UMat, inpaintMask: UMat, inpaintRadius: float, flags: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def insertChannel(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, coi: int) -> cv2.typing.MatLike: ... +@typing.overload +def insertChannel(src: UMat, dst: UMat, coi: int) -> UMat: ... + + +@typing.overload +def integral( + src: cv2.typing.MatLike, sum: cv2.typing.MatLike | + None = ..., sdepth: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def integral(src: UMat, sum: UMat | None = ..., sdepth: int = ...) -> UMat: ... + + +@typing.overload +def integral2( + src: cv2.typing.MatLike, + sum: cv2.typing.MatLike | None = ..., + sqsum: cv2.typing.MatLike | None = ..., + sdepth: int = ..., + sqdepth: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def integral2( + src: UMat, + sum: UMat | None = ..., + sqsum: UMat | None = ..., + sdepth: int = ..., + sqdepth: int = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def integral3( + src: cv2.typing.MatLike, + sum: cv2.typing.MatLike | None = ..., + sqsum: cv2.typing.MatLike | None = ..., + tilted: cv2.typing.MatLike | None = ..., + sdepth: int = ..., + sqdepth: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def integral3( + src: UMat, + sum: UMat | None = ..., + sqsum: UMat | None = ..., + tilted: UMat | None = ..., + sdepth: int = ..., + sqdepth: int = ..., +) -> tuple[ + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def intersectConvexConvex( + p1: cv2.typing.MatLike, + p2: cv2.typing.MatLike, + p12: cv2.typing.MatLike | None = ..., + handleNested: bool = ..., +) -> tuple[ + float, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def intersectConvexConvex( + p1: UMat, + p2: UMat, + p12: UMat | None = ..., + handleNested: bool = ..., +) -> tuple[ + float, + UMat, +]: ... + + +@typing.overload +def invert( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + flags: int = ..., +) -> tuple[ + float, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def invert(src: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[float, UMat]: ... + + +@typing.overload +def invertAffineTransform(M: cv2.typing.MatLike, iM: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def invertAffineTransform(M: UMat, iM: UMat | None = ...) -> UMat: ... + + +@typing.overload +def isContourConvex(contour: cv2.typing.MatLike) -> bool: ... +@typing.overload +def isContourConvex(contour: UMat) -> bool: ... + + +@typing.overload +def kmeans( + data: cv2.typing.MatLike, + K: int, + bestLabels: cv2.typing.MatLike, + criteria: cv2.typing.TermCriteria, + attempts: int, + flags: int, + centers: cv2.typing.MatLike | None = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def kmeans( + data: UMat, + K: int, + bestLabels: UMat, + criteria: cv2.typing.TermCriteria, + attempts: int, + flags: int, + centers: UMat | None = ..., +) -> tuple[ + float, + UMat, + UMat, +]: ... + + +@typing.overload +def line( + img: cv2.typing.MatLike, + pt1: cv2.typing.Point, + pt2: cv2.typing.Point, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def line( + img: UMat, + pt1: cv2.typing.Point, + pt2: cv2.typing.Point, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> UMat: ... + + +@typing.overload +def linearPolar( + src: cv2.typing.MatLike, center: cv2.typing.Point2f, maxRadius: float, + flags: int, dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def linearPolar( + src: UMat, + center: cv2.typing.Point2f, + maxRadius: float, + flags: int, + dst: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def log(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def log(src: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def logPolar( + src: cv2.typing.MatLike, center: cv2.typing.Point2f, M: float, flags: int, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def logPolar(src: UMat, center: cv2.typing.Point2f, M: float, flags: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def magnitude( + x: cv2.typing.MatLike, y: cv2.typing.MatLike, + magnitude: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def magnitude(x: UMat, y: UMat, magnitude: UMat | None = ...) -> UMat: ... + + +@typing.overload +def matMulDeriv( + A: cv2.typing.MatLike, + B: cv2.typing.MatLike, + dABdA: cv2.typing.MatLike | None = ..., + dABdB: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def matMulDeriv(A: UMat, B: UMat, dABdA: UMat | None = ..., dABdB: UMat | None = ...) -> tuple[UMat, UMat]: ... + + +@typing.overload +def matchShapes(contour1: cv2.typing.MatLike, contour2: cv2.typing.MatLike, method: int, parameter: float) -> float: ... +@typing.overload +def matchShapes(contour1: UMat, contour2: UMat, method: int, parameter: float) -> float: ... + + +@typing.overload +def matchTemplate( + image: cv2.typing.MatLike, + templ: cv2.typing.MatLike, + method: int, + result: cv2.typing.MatLike | None = ..., + mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def matchTemplate( + image: UMat, + templ: UMat, + method: int, + result: UMat | None = ..., + mask: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def max( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def max(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def mean(src: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.Scalar: ... +@typing.overload +def mean(src: UMat, mask: UMat | None = ...) -> cv2.typing.Scalar: ... + + +@typing.overload +def meanShift( + probImage: cv2.typing.MatLike, + window: cv2.typing.Rect, + criteria: cv2.typing.TermCriteria, +) -> tuple[ + int, + cv2.typing.Rect, +]: ... + + +@typing.overload +def meanShift( + probImage: UMat, + window: cv2.typing.Rect, + criteria: cv2.typing.TermCriteria, +) -> tuple[ + int, + cv2.typing.Rect, +]: ... + + +@typing.overload +def meanStdDev( + src: cv2.typing.MatLike, + mean: cv2.typing.MatLike | None = ..., + stddev: cv2.typing.MatLike | None = ..., + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def meanStdDev( + src: UMat, + mean: UMat | None = ..., + stddev: UMat | None = ..., + mask: UMat | None = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def medianBlur(src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def medianBlur(src: UMat, ksize: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def merge(mv: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def merge(mv: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def min( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def min(src1: UMat, src2: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def minAreaRect(points: cv2.typing.MatLike) -> cv2.typing.RotatedRect: ... +@typing.overload +def minAreaRect(points: UMat) -> cv2.typing.RotatedRect: ... + + +@typing.overload +def minEnclosingCircle(points: cv2.typing.MatLike) -> tuple[cv2.typing.Point2f, float]: ... +@typing.overload +def minEnclosingCircle(points: UMat) -> tuple[cv2.typing.Point2f, float]: ... + + +@typing.overload +def minEnclosingTriangle( + points: cv2.typing.MatLike, + triangle: cv2.typing.MatLike | None = ..., +) -> tuple[ + float, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def minEnclosingTriangle(points: UMat, triangle: UMat | None = ...) -> tuple[float, UMat]: ... + + +@typing.overload +def minMaxLoc( + src: cv2.typing.MatLike, + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + float, + float, + cv2.typing.Point, + cv2.typing.Point, +]: ... + + +@typing.overload +def minMaxLoc(src: UMat, mask: UMat | None = ...) -> tuple[float, float, cv2.typing.Point, cv2.typing.Point]: ... + + +@typing.overload +def mixChannels( + src: typing.Sequence[cv2.typing.MatLike], + dst: typing.Sequence[cv2.typing.MatLike], + fromTo: typing.Sequence[int], +) -> typing.Sequence[cv2.typing.MatLike]: ... + + +@typing.overload +def mixChannels( + src: typing.Sequence[UMat], dst: typing.Sequence[UMat], + fromTo: typing.Sequence[int], +) -> typing.Sequence[UMat]: ... + + +@typing.overload +def moments(array: cv2.typing.MatLike, binaryImage: bool = ...) -> cv2.typing.Moments: ... +@typing.overload +def moments(array: UMat, binaryImage: bool = ...) -> cv2.typing.Moments: ... + + +@typing.overload +def morphologyEx( + src: cv2.typing.MatLike, + op: int, + kernel: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + anchor: cv2.typing.Point = ..., + iterations: int = ..., + borderType: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def morphologyEx( + src: UMat, + op: int, + kernel: UMat, + dst: UMat | None = ..., + anchor: cv2.typing.Point = ..., + iterations: int = ..., + borderType: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> UMat: ... + + +def moveWindow(winname: str, x: int, y: int) -> None: ... + + +@typing.overload +def mulSpectrums( + a: cv2.typing.MatLike, + b: cv2.typing.MatLike, + flags: int, + c: cv2.typing.MatLike | None = ..., + conjB: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def mulSpectrums(a: UMat, b: UMat, flags: int, c: UMat | None = ..., conjB: bool = ...) -> UMat: ... + + +@typing.overload +def mulTransposed( + src: cv2.typing.MatLike, + aTa: bool, + dst: cv2.typing.MatLike | None = ..., + delta: cv2.typing.MatLike | None = ..., + scale: float = ..., + dtype: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def mulTransposed( + src: UMat, + aTa: bool, + dst: UMat | None = ..., + delta: UMat | None = ..., + scale: float = ..., + dtype: int = ..., +) -> UMat: ... + + +@typing.overload +def multiply( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | + None = ..., scale: float = ..., dtype: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def multiply(src1: UMat, src2: UMat, dst: UMat | None = ..., scale: float = ..., dtype: int = ...) -> UMat: ... + + +def namedWindow(winname: str, flags: int = ...) -> None: ... + + +@typing.overload +def norm(src1: cv2.typing.MatLike, normType: int = ..., mask: cv2.typing.MatLike | None = ...) -> float: ... +@typing.overload +def norm(src1: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ... + + +@typing.overload +def norm( + src1: cv2.typing.MatLike, + src2: cv2.typing.MatLike, + normType: int = ..., + mask: cv2.typing.MatLike | None = ..., +) -> float: ... + + +@typing.overload +def norm(src1: UMat, src2: UMat, normType: int = ..., mask: UMat | None = ...) -> float: ... + + +@typing.overload +def normalize( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alpha: float = ..., beta: float = ..., + norm_type: int = ..., dtype: int = ..., mask: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def normalize( + src: UMat, + dst: UMat, + alpha: float = ..., + beta: float = ..., + norm_type: int = ..., + dtype: int = ..., + mask: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def patchNaNs(a: cv2.typing.MatLike, val: float = ...) -> cv2.typing.MatLike: ... +@typing.overload +def patchNaNs(a: UMat, val: float = ...) -> UMat: ... + + +@typing.overload +def pencilSketch( + src: cv2.typing.MatLike, + dst1: cv2.typing.MatLike | None = ..., + dst2: cv2.typing.MatLike | None = ..., + sigma_s: float = ..., + sigma_r: float = ..., + shade_factor: float = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def pencilSketch( + src: UMat, + dst1: UMat | None = ..., + dst2: UMat | None = ..., + sigma_s: float = ..., + sigma_r: float = ..., + shade_factor: float = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def perspectiveTransform( + src: cv2.typing.MatLike, m: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def perspectiveTransform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def phase( + x: cv2.typing.MatLike, y: cv2.typing.MatLike, angle: cv2.typing.MatLike | + None = ..., angleInDegrees: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def phase(x: UMat, y: UMat, angle: UMat | None = ..., angleInDegrees: bool = ...) -> UMat: ... + + +@typing.overload +def phaseCorrelate( + src1: cv2.typing.MatLike, + src2: cv2.typing.MatLike, + window: cv2.typing.MatLike | None = ..., +) -> tuple[ + cv2.typing.Point2d, + float, +]: ... + + +@typing.overload +def phaseCorrelate(src1: UMat, src2: UMat, window: UMat | None = ...) -> tuple[cv2.typing.Point2d, float]: ... + + +@typing.overload +def pointPolygonTest(contour: cv2.typing.MatLike, pt: cv2.typing.Point2f, measureDist: bool) -> float: ... +@typing.overload +def pointPolygonTest(contour: UMat, pt: cv2.typing.Point2f, measureDist: bool) -> float: ... + + +@typing.overload +def polarToCart( + magnitude: cv2.typing.MatLike, + angle: cv2.typing.MatLike, + x: cv2.typing.MatLike | None = ..., + y: cv2.typing.MatLike | None = ..., + angleInDegrees: bool = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def polarToCart( + magnitude: UMat, + angle: UMat, + x: UMat | None = ..., + y: UMat | None = ..., + angleInDegrees: bool = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +def pollKey() -> int: ... + + +@typing.overload +def polylines( + img: cv2.typing.MatLike, + pts: typing.Sequence[cv2.typing.MatLike], + isClosed: bool, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def polylines( + img: UMat, + pts: typing.Sequence[UMat], + isClosed: bool, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> UMat: ... + + +@typing.overload +def pow(src: cv2.typing.MatLike, power: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def pow(src: UMat, power: float, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def preCornerDetect( + src: cv2.typing.MatLike, ksize: int, dst: cv2.typing.MatLike | + None = ..., borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def preCornerDetect(src: UMat, ksize: int, dst: UMat | None = ..., borderType: int = ...) -> UMat: ... + + +@typing.overload +def projectPoints( + objectPoints: cv2.typing.MatLike, + rvec: cv2.typing.MatLike, + tvec: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + imagePoints: cv2.typing.MatLike | None = ..., + jacobian: cv2.typing.MatLike | None = ..., + aspectRatio: float = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def projectPoints( + objectPoints: UMat, + rvec: UMat, + tvec: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + imagePoints: UMat | None = ..., + jacobian: UMat | None = ..., + aspectRatio: float = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def putText( + img: cv2.typing.MatLike, + text: str, + org: cv2.typing.Point, + fontFace: int, + fontScale: float, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + bottomLeftOrigin: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def putText( + img: UMat, + text: str, + org: cv2.typing.Point, + fontFace: int, + fontScale: float, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + bottomLeftOrigin: bool = ..., +) -> UMat: ... + + +@typing.overload +def pyrDown( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + dstsize: cv2.typing.Size = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def pyrDown(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ... + + +@typing.overload +def pyrMeanShiftFiltering( + src: cv2.typing.MatLike, + sp: float, + sr: float, + dst: cv2.typing.MatLike | None = ..., + maxLevel: int = ..., + termcrit: cv2.typing.TermCriteria = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def pyrMeanShiftFiltering( + src: UMat, + sp: float, + sr: float, + dst: UMat | None = ..., + maxLevel: int = ..., + termcrit: cv2.typing.TermCriteria = ..., +) -> UMat: ... + + +@typing.overload +def pyrUp( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + dstsize: cv2.typing.Size = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def pyrUp(src: UMat, dst: UMat | None = ..., dstsize: cv2.typing.Size = ..., borderType: int = ...) -> UMat: ... + + +@typing.overload +def randShuffle(dst: cv2.typing.MatLike, iterFactor: float = ...) -> cv2.typing.MatLike: ... +@typing.overload +def randShuffle(dst: UMat, iterFactor: float = ...) -> UMat: ... + + +@typing.overload +def randn(dst: cv2.typing.MatLike, mean: cv2.typing.MatLike, stddev: cv2.typing.MatLike) -> cv2.typing.MatLike: ... +@typing.overload +def randn(dst: UMat, mean: UMat, stddev: UMat) -> UMat: ... + + +@typing.overload +def randu(dst: cv2.typing.MatLike, low: cv2.typing.MatLike, high: cv2.typing.MatLike) -> cv2.typing.MatLike: ... +@typing.overload +def randu(dst: UMat, low: UMat, high: UMat) -> UMat: ... + + +def readOpticalFlow(path: str) -> cv2.typing.MatLike: ... + + +@typing.overload +def recoverPose( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + cameraMatrix1: cv2.typing.MatLike, + distCoeffs1: cv2.typing.MatLike, + cameraMatrix2: cv2.typing.MatLike, + distCoeffs2: cv2.typing.MatLike, + E: cv2.typing.MatLike | None = ..., + R: cv2.typing.MatLike | None = ..., + t: cv2.typing.MatLike | None = ..., + method: int = ..., + prob: float = ..., + threshold: float = ..., + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def recoverPose( + points1: UMat, + points2: UMat, + cameraMatrix1: UMat, + distCoeffs1: UMat, + cameraMatrix2: UMat, + distCoeffs2: UMat, + E: UMat | None = ..., + R: UMat | None = ..., + t: UMat | None = ..., + method: int = ..., + prob: float = ..., + threshold: float = ..., + mask: UMat | None = ..., +) -> tuple[ + int, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def recoverPose( + E: cv2.typing.MatLike, + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + R: cv2.typing.MatLike | None = ..., + t: cv2.typing.MatLike | None = ..., + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def recoverPose( + E: UMat, + points1: UMat, + points2: UMat, + cameraMatrix: UMat, + R: UMat | None = ..., + t: UMat | None = ..., + mask: UMat | None = ..., +) -> tuple[ + int, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def recoverPose( + E: cv2.typing.MatLike, + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + R: cv2.typing.MatLike | None = ..., + t: cv2.typing.MatLike | None = ..., + focal: float = ..., + pp: cv2.typing.Point2d = ..., + mask: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def recoverPose( + E: UMat, + points1: UMat, + points2: UMat, + R: UMat | None = ..., + t: UMat | None = ..., + focal: float = ..., + pp: cv2.typing.Point2d = ..., + mask: UMat | None = ..., +) -> tuple[ + int, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def recoverPose( + E: cv2.typing.MatLike, + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distanceThresh: float, + R: cv2.typing.MatLike | None = ..., + t: cv2.typing.MatLike | None = ..., + mask: cv2.typing.MatLike | None = ..., + triangulatedPoints: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def recoverPose( + E: UMat, + points1: UMat, + points2: UMat, + cameraMatrix: UMat, + distanceThresh: float, + R: UMat | None = ..., + t: UMat | None = ..., + mask: UMat | None = ..., + triangulatedPoints: UMat | None = ..., +) -> tuple[ + int, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def rectangle( + img: cv2.typing.MatLike, + pt1: cv2.typing.Point, + pt2: cv2.typing.Point, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def rectangle( + img: UMat, + pt1: cv2.typing.Point, + pt2: cv2.typing.Point, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> UMat: ... + + +@typing.overload +def rectangle( + img: cv2.typing.MatLike, + rec: cv2.typing.Rect, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def rectangle( + img: UMat, + rec: cv2.typing.Rect, + color: cv2.typing.Scalar, + thickness: int = ..., + lineType: int = ..., + shift: int = ..., +) -> UMat: ... + + +def rectangleIntersectionArea(a: cv2.typing.Rect2d, b: cv2.typing.Rect2d) -> float: ... + + +@typing.overload +def rectify3Collinear( + cameraMatrix1: cv2.typing.MatLike, + distCoeffs1: cv2.typing.MatLike, + cameraMatrix2: cv2.typing.MatLike, + distCoeffs2: cv2.typing.MatLike, + cameraMatrix3: cv2.typing.MatLike, + distCoeffs3: cv2.typing.MatLike, + imgpt1: typing.Sequence[cv2.typing.MatLike], + imgpt3: typing.Sequence[cv2.typing.MatLike], + imageSize: cv2.typing.Size, + R12: cv2.typing.MatLike, + T12: cv2.typing.MatLike, + R13: cv2.typing.MatLike, + T13: cv2.typing.MatLike, + alpha: float, + newImgSize: cv2.typing.Size, + flags: int, + R1: cv2.typing.MatLike | None = ..., + R2: cv2.typing.MatLike | None = ..., + R3: cv2.typing.MatLike | None = ..., + P1: cv2.typing.MatLike | None = ..., + P2: cv2.typing.MatLike | None = ..., + P3: cv2.typing.MatLike | None = ..., + Q: cv2.typing.MatLike | None = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.Rect, + cv2.typing.Rect, +]: ... + + +@typing.overload +def rectify3Collinear( + cameraMatrix1: UMat, + distCoeffs1: UMat, + cameraMatrix2: UMat, + distCoeffs2: UMat, + cameraMatrix3: UMat, + distCoeffs3: UMat, + imgpt1: typing.Sequence[UMat], + imgpt3: typing.Sequence[UMat], + imageSize: cv2.typing.Size, + R12: UMat, + T12: UMat, + R13: UMat, + T13: UMat, + alpha: float, + newImgSize: cv2.typing.Size, + flags: int, + R1: UMat | None = ..., + R2: UMat | None = ..., + R3: UMat | None = ..., + P1: UMat | None = ..., + P2: UMat | None = ..., + P3: UMat | None = ..., + Q: UMat | None = ..., +) -> tuple[ + float, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + cv2.typing.Rect, + cv2.typing.Rect, +]: ... + + +@typing.overload +def reduce( + src: cv2.typing.MatLike, + dim: int, + rtype: int, + dst: cv2.typing.MatLike | None = ..., + dtype: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def reduce(src: UMat, dim: int, rtype: int, dst: UMat | None = ..., dtype: int = ...) -> UMat: ... + + +@typing.overload +def reduceArgMax( + src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | + None = ..., lastIndex: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def reduceArgMax(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ... + + +@typing.overload +def reduceArgMin( + src: cv2.typing.MatLike, axis: int, dst: cv2.typing.MatLike | + None = ..., lastIndex: bool = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def reduceArgMin(src: UMat, axis: int, dst: UMat | None = ..., lastIndex: bool = ...) -> UMat: ... + + +@typing.overload +def remap( + src: cv2.typing.MatLike, + map1: cv2.typing.MatLike, + map2: cv2.typing.MatLike, + interpolation: int, + dst: cv2.typing.MatLike | None = ..., + borderMode: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def remap( + src: UMat, map1: UMat, map2: UMat, interpolation: int, dst: UMat | None = ..., + borderMode: int = ..., borderValue: cv2.typing.Scalar = ..., +) -> UMat: ... + + +@typing.overload +def repeat(src: cv2.typing.MatLike, ny: int, nx: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def repeat(src: UMat, ny: int, nx: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def reprojectImageTo3D( + disparity: cv2.typing.MatLike, + Q: cv2.typing.MatLike, + _3dImage: cv2.typing.MatLike | None = ..., + handleMissingValues: bool = ..., + ddepth: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def reprojectImageTo3D( + disparity: UMat, + Q: UMat, + _3dImage: UMat | None = ..., + handleMissingValues: bool = ..., + ddepth: int = ..., +) -> UMat: ... + + +@typing.overload +def resize( + src: cv2.typing.MatLike, dsize: cv2.typing.Size | None, dst: cv2.typing.MatLike | None = ..., + fx: float = ..., fy: float = ..., interpolation: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def resize( + src: UMat, + dsize: cv2.typing.Size | None, + dst: UMat | None = ..., + fx: float = ..., + fy: float = ..., + interpolation: int = ..., +) -> UMat: ... + + +@typing.overload +def resizeWindow(winname: str, width: int, height: int) -> None: ... +@typing.overload +def resizeWindow(winname: str, size: cv2.typing.Size) -> None: ... + + +@typing.overload +def rotate(src: cv2.typing.MatLike, rotateCode: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def rotate(src: UMat, rotateCode: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def rotatedRectangleIntersection( + rect1: cv2.typing.RotatedRect, + rect2: cv2.typing.RotatedRect, + intersectingRegion: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def rotatedRectangleIntersection( + rect1: cv2.typing.RotatedRect, + rect2: cv2.typing.RotatedRect, + intersectingRegion: UMat | None = ..., +) -> tuple[ + int, + UMat, +]: ... + + +@typing.overload +def sampsonDistance(pt1: cv2.typing.MatLike, pt2: cv2.typing.MatLike, F: cv2.typing.MatLike) -> float: ... +@typing.overload +def sampsonDistance(pt1: UMat, pt2: UMat, F: UMat) -> float: ... + + +@typing.overload +def scaleAdd( + src1: cv2.typing.MatLike, alpha: float, src2: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def scaleAdd(src1: UMat, alpha: float, src2: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def seamlessClone( + src: cv2.typing.MatLike, + dst: cv2.typing.MatLike, + mask: cv2.typing.MatLike, + p: cv2.typing.Point, + flags: int, + blend: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def seamlessClone( + src: UMat, dst: UMat, mask: UMat, p: cv2.typing.Point, + flags: int, blend: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def selectROI( + windowName: str, img: cv2.typing.MatLike, showCrosshair: bool = ..., + fromCenter: bool = ..., printNotice: bool = ..., +) -> cv2.typing.Rect: ... + + +@typing.overload +def selectROI( + windowName: str, + img: UMat, + showCrosshair: bool = ..., + fromCenter: bool = ..., + printNotice: bool = ..., +) -> cv2.typing.Rect: ... + + +@typing.overload +def selectROI( + img: cv2.typing.MatLike, + showCrosshair: bool = ..., + fromCenter: bool = ..., + printNotice: bool = ..., +) -> cv2.typing.Rect: ... + + +@typing.overload +def selectROI( + img: UMat, + showCrosshair: bool = ..., + fromCenter: bool = ..., + printNotice: bool = ..., +) -> cv2.typing.Rect: ... + + +@typing.overload +def selectROIs( + windowName: str, img: cv2.typing.MatLike, showCrosshair: bool = ..., + fromCenter: bool = ..., printNotice: bool = ..., +) -> typing.Sequence[cv2.typing.Rect]: ... + + +@typing.overload +def selectROIs( + windowName: str, img: UMat, showCrosshair: bool = ..., fromCenter: bool = ..., + printNotice: bool = ..., +) -> typing.Sequence[cv2.typing.Rect]: ... + + +@typing.overload +def sepFilter2D( + src: cv2.typing.MatLike, + ddepth: int, + kernelX: cv2.typing.MatLike, + kernelY: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + anchor: cv2.typing.Point = ..., + delta: float = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def sepFilter2D( + src: UMat, + ddepth: int, + kernelX: UMat, + kernelY: UMat, + dst: UMat | None = ..., + anchor: cv2.typing.Point = ..., + delta: float = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def setIdentity(mtx: cv2.typing.MatLike, s: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ... +@typing.overload +def setIdentity(mtx: UMat, s: cv2.typing.Scalar = ...) -> UMat: ... + + +def setLogLevel(level: int) -> int: ... + + +def setNumThreads(nthreads: int) -> None: ... + + +def setRNGSeed(seed: int) -> None: ... + + +def setTrackbarMax(trackbarname: str, winname: str, maxval: int) -> None: ... + + +def setTrackbarMin(trackbarname: str, winname: str, minval: int) -> None: ... + + +def setTrackbarPos(trackbarname: str, winname: str, pos: int) -> None: ... + + +def setUseOpenVX(flag: bool) -> None: ... + + +def setUseOptimized(onoff: bool) -> None: ... + + +def setWindowProperty(winname: str, prop_id: int, prop_value: float) -> None: ... + + +def setWindowTitle(winname: str, title: str) -> None: ... + + +@typing.overload +def solve( + src1: cv2.typing.MatLike, + src2: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + flags: int = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solve(src1: UMat, src2: UMat, dst: UMat | None = ..., flags: int = ...) -> tuple[bool, UMat]: ... + + +@typing.overload +def solveCubic( + coeffs: cv2.typing.MatLike, + roots: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solveCubic(coeffs: UMat, roots: UMat | None = ...) -> tuple[int, UMat]: ... + + +@typing.overload +def solveLP( + Func: cv2.typing.MatLike, + Constr: cv2.typing.MatLike, + z: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solveLP(Func: UMat, Constr: UMat, z: UMat | None = ...) -> tuple[int, UMat]: ... + + +@typing.overload +def solveP3P( + objectPoints: cv2.typing.MatLike, + imagePoints: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + flags: int, + rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., +) -> tuple[ + int, + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], +]: ... + + +@typing.overload +def solveP3P( + objectPoints: UMat, + imagePoints: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + flags: int, + rvecs: typing.Sequence[UMat] | None = ..., + tvecs: typing.Sequence[UMat] | None = ..., +) -> tuple[ + int, + typing.Sequence[UMat], + typing.Sequence[UMat], +]: ... + + +@typing.overload +def solvePnP( + objectPoints: cv2.typing.MatLike, + imagePoints: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvec: cv2.typing.MatLike | None = ..., + tvec: cv2.typing.MatLike | None = ..., + useExtrinsicGuess: bool = ..., + flags: int = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solvePnP( + objectPoints: UMat, + imagePoints: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + rvec: UMat | None = ..., + tvec: UMat | None = ..., + useExtrinsicGuess: bool = ..., + flags: int = ..., +) -> tuple[ + bool, + UMat, + UMat, +]: ... + + +@typing.overload +def solvePnPGeneric( + objectPoints: cv2.typing.MatLike, + imagePoints: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + useExtrinsicGuess: bool = ..., + flags: SolvePnPMethod = ..., + rvec: cv2.typing.MatLike | None = ..., + tvec: cv2.typing.MatLike | None = ..., + reprojectionError: cv2.typing.MatLike | None = ..., +) -> tuple[ + int, + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solvePnPGeneric( + objectPoints: UMat, + imagePoints: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + rvecs: typing.Sequence[UMat] | None = ..., + tvecs: typing.Sequence[UMat] | None = ..., + useExtrinsicGuess: bool = ..., + flags: SolvePnPMethod = ..., + rvec: UMat | None = ..., + tvec: UMat | None = ..., + reprojectionError: UMat | None = ..., +) -> tuple[ + int, + typing.Sequence[UMat], + typing.Sequence[UMat], + UMat, +]: ... + + +@typing.overload +def solvePnPRansac( + objectPoints: cv2.typing.MatLike, + imagePoints: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvec: cv2.typing.MatLike | None = ..., + tvec: cv2.typing.MatLike | None = ..., + useExtrinsicGuess: bool = ..., + iterationsCount: int = ..., + reprojectionError: float = ..., + confidence: float = ..., + inliers: cv2.typing.MatLike | None = ..., + flags: int = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solvePnPRansac( + objectPoints: UMat, + imagePoints: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + rvec: UMat | None = ..., + tvec: UMat | None = ..., + useExtrinsicGuess: bool = ..., + iterationsCount: int = ..., + reprojectionError: float = ..., + confidence: float = ..., + inliers: UMat | None = ..., + flags: int = ..., +) -> tuple[ + bool, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def solvePnPRansac( + objectPoints: cv2.typing.MatLike, + imagePoints: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvec: cv2.typing.MatLike | None = ..., + tvec: cv2.typing.MatLike | None = ..., + inliers: cv2.typing.MatLike | None = ..., + params: UsacParams = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solvePnPRansac( + objectPoints: UMat, + imagePoints: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + rvec: UMat | None = ..., + tvec: UMat | None = ..., + inliers: UMat | None = ..., + params: UsacParams = ..., +) -> tuple[ + bool, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def solvePnPRefineLM( + objectPoints: cv2.typing.MatLike, + imagePoints: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvec: cv2.typing.MatLike, + tvec: cv2.typing.MatLike, + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solvePnPRefineLM( + objectPoints: UMat, + imagePoints: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + rvec: UMat, + tvec: UMat, + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def solvePnPRefineVVS( + objectPoints: cv2.typing.MatLike, + imagePoints: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + rvec: cv2.typing.MatLike, + tvec: cv2.typing.MatLike, + criteria: cv2.typing.TermCriteria = ..., + VVSlambda: float = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solvePnPRefineVVS( + objectPoints: UMat, + imagePoints: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + rvec: UMat, + tvec: UMat, + criteria: cv2.typing.TermCriteria = ..., + VVSlambda: float = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def solvePoly( + coeffs: cv2.typing.MatLike, + roots: cv2.typing.MatLike | None = ..., + maxIters: int = ..., +) -> tuple[ + float, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def solvePoly(coeffs: UMat, roots: UMat | None = ..., maxIters: int = ...) -> tuple[float, UMat]: ... + + +@typing.overload +def sort(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def sort(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def sortIdx(src: cv2.typing.MatLike, flags: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def sortIdx(src: UMat, flags: int, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def spatialGradient( + src: cv2.typing.MatLike, + dx: cv2.typing.MatLike | None = ..., + dy: cv2.typing.MatLike | None = ..., + ksize: int = ..., + borderType: int = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def spatialGradient( + src: UMat, + dx: UMat | None = ..., + dy: UMat | None = ..., + ksize: int = ..., + borderType: int = ..., +) -> tuple[ + UMat, + UMat, +]: ... + + +@typing.overload +def split( + m: cv2.typing.MatLike, mv: typing.Sequence[cv2.typing.MatLike] + | None = ..., +) -> typing.Sequence[cv2.typing.MatLike]: ... + + +@typing.overload +def split(m: UMat, mv: typing.Sequence[UMat] | None = ...) -> typing.Sequence[UMat]: ... + + +@typing.overload +def sqrBoxFilter( + src: cv2.typing.MatLike, + ddepth: int, + ksize: cv2.typing.Size, + dst: cv2.typing.MatLike | None = ..., + anchor: cv2.typing.Point = ..., + normalize: bool = ..., + borderType: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def sqrBoxFilter( + src: UMat, + ddepth: int, + ksize: cv2.typing.Size, + dst: UMat | None = ..., + anchor: cv2.typing.Point = ..., + normalize: bool = ..., + borderType: int = ..., +) -> UMat: ... + + +@typing.overload +def sqrt(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def sqrt(src: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def stackBlur( + src: cv2.typing.MatLike, ksize: cv2.typing.Size, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def stackBlur(src: UMat, ksize: cv2.typing.Size, dst: UMat | None = ...) -> UMat: ... + + +def startWindowThread() -> int: ... + + +@typing.overload +def stereoCalibrate( + objectPoints: typing.Sequence[cv2.typing.MatLike], + imagePoints1: typing.Sequence[cv2.typing.MatLike], + imagePoints2: typing.Sequence[cv2.typing.MatLike], + cameraMatrix1: cv2.typing.MatLike, + distCoeffs1: cv2.typing.MatLike, + cameraMatrix2: cv2.typing.MatLike, + distCoeffs2: cv2.typing.MatLike, + imageSize: cv2.typing.Size, + R: cv2.typing.MatLike | None = ..., + T: cv2.typing.MatLike | None = ..., + E: cv2.typing.MatLike | None = ..., + F: cv2.typing.MatLike | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def stereoCalibrate( + objectPoints: typing.Sequence[UMat], + imagePoints1: typing.Sequence[UMat], + imagePoints2: typing.Sequence[UMat], + cameraMatrix1: UMat, + distCoeffs1: UMat, + cameraMatrix2: UMat, + distCoeffs2: UMat, + imageSize: cv2.typing.Size, + R: UMat | None = ..., + T: UMat | None = ..., + E: UMat | None = ..., + F: UMat | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def stereoCalibrate( + objectPoints: typing.Sequence[cv2.typing.MatLike], + imagePoints1: typing.Sequence[cv2.typing.MatLike], + imagePoints2: typing.Sequence[cv2.typing.MatLike], + cameraMatrix1: cv2.typing.MatLike, + distCoeffs1: cv2.typing.MatLike, + cameraMatrix2: cv2.typing.MatLike, + distCoeffs2: cv2.typing.MatLike, + imageSize: cv2.typing.Size, + R: cv2.typing.MatLike, + T: cv2.typing.MatLike, + E: cv2.typing.MatLike | None = ..., + F: cv2.typing.MatLike | None = ..., + perViewErrors: cv2.typing.MatLike | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def stereoCalibrate( + objectPoints: typing.Sequence[UMat], + imagePoints1: typing.Sequence[UMat], + imagePoints2: typing.Sequence[UMat], + cameraMatrix1: UMat, + distCoeffs1: UMat, + cameraMatrix2: UMat, + distCoeffs2: UMat, + imageSize: cv2.typing.Size, + R: UMat, + T: UMat, + E: UMat | None = ..., + F: UMat | None = ..., + perViewErrors: UMat | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, +]: ... + + +@typing.overload +def stereoCalibrateExtended( + objectPoints: typing.Sequence[cv2.typing.MatLike], + imagePoints1: typing.Sequence[cv2.typing.MatLike], + imagePoints2: typing.Sequence[cv2.typing.MatLike], + cameraMatrix1: cv2.typing.MatLike, + distCoeffs1: cv2.typing.MatLike, + cameraMatrix2: cv2.typing.MatLike, + distCoeffs2: cv2.typing.MatLike, + imageSize: cv2.typing.Size, + R: cv2.typing.MatLike, + T: cv2.typing.MatLike, + E: cv2.typing.MatLike | None = ..., + F: cv2.typing.MatLike | None = ..., + rvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + tvecs: typing.Sequence[cv2.typing.MatLike] | None = ..., + perViewErrors: cv2.typing.MatLike | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + typing.Sequence[cv2.typing.MatLike], + typing.Sequence[cv2.typing.MatLike], + cv2.typing.MatLike, +]: ... + + +@typing.overload +def stereoCalibrateExtended( + objectPoints: typing.Sequence[UMat], + imagePoints1: typing.Sequence[UMat], + imagePoints2: typing.Sequence[UMat], + cameraMatrix1: UMat, + distCoeffs1: UMat, + cameraMatrix2: UMat, + distCoeffs2: UMat, + imageSize: cv2.typing.Size, + R: UMat, + T: UMat, + E: UMat | None = ..., + F: UMat | None = ..., + rvecs: typing.Sequence[UMat] | None = ..., + tvecs: typing.Sequence[UMat] | None = ..., + perViewErrors: UMat | None = ..., + flags: int = ..., + criteria: cv2.typing.TermCriteria = ..., +) -> tuple[ + float, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + UMat, + typing.Sequence[UMat], + typing.Sequence[UMat], + UMat, +]: ... + + +@typing.overload +def stereoRectify( + cameraMatrix1: cv2.typing.MatLike, + distCoeffs1: cv2.typing.MatLike, + cameraMatrix2: cv2.typing.MatLike, + distCoeffs2: cv2.typing.MatLike, + imageSize: cv2.typing.Size, + R: cv2.typing.MatLike, + T: cv2.typing.MatLike, + R1: cv2.typing.MatLike | None = ..., + R2: cv2.typing.MatLike | None = ..., + P1: cv2.typing.MatLike | None = ..., + P2: cv2.typing.MatLike | None = ..., + Q: cv2.typing.MatLike | None = ..., + flags: int = ..., + alpha: float = ..., + newImageSize: cv2.typing.Size = ..., +) -> tuple[ + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.MatLike, + cv2.typing.Rect, + cv2.typing.Rect, +]: ... + + +@typing.overload +def stereoRectify( + cameraMatrix1: UMat, + distCoeffs1: UMat, + cameraMatrix2: UMat, + distCoeffs2: UMat, + imageSize: cv2.typing.Size, + R: UMat, + T: UMat, + R1: UMat | None = ..., + R2: UMat | None = ..., + P1: UMat | None = ..., + P2: UMat | None = ..., + Q: UMat | None = ..., + flags: int = ..., + alpha: float = ..., + newImageSize: cv2.typing.Size = ..., +) -> tuple[ + UMat, + UMat, + UMat, + UMat, + UMat, + cv2.typing.Rect, + cv2.typing.Rect, +]: ... + + +@typing.overload +def stereoRectifyUncalibrated( + points1: cv2.typing.MatLike, + points2: cv2.typing.MatLike, + F: cv2.typing.MatLike, + imgSize: cv2.typing.Size, + H1: cv2.typing.MatLike | None = ..., + H2: cv2.typing.MatLike | None = ..., + threshold: float = ..., +) -> tuple[ + bool, + cv2.typing.MatLike, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def stereoRectifyUncalibrated( + points1: UMat, + points2: UMat, + F: UMat, + imgSize: cv2.typing.Size, + H1: UMat | None = ..., + H2: UMat | None = ..., + threshold: float = ..., +) -> tuple[ + bool, + UMat, + UMat, +]: ... + + +@typing.overload +def stylization( + src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., + sigma_s: float = ..., sigma_r: float = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def stylization(src: UMat, dst: UMat | None = ..., sigma_s: float = ..., sigma_r: float = ...) -> UMat: ... + + +@typing.overload +def subtract( + src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., + mask: cv2.typing.MatLike | None = ..., dtype: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def subtract(src1: UMat, src2: UMat, dst: UMat | None = ..., mask: UMat | None = ..., dtype: int = ...) -> UMat: ... + + +@typing.overload +def sumElems(src: cv2.typing.MatLike) -> cv2.typing.Scalar: ... +@typing.overload +def sumElems(src: UMat) -> cv2.typing.Scalar: ... + + +@typing.overload +def textureFlattening( + src: cv2.typing.MatLike, + mask: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + low_threshold: float = ..., + high_threshold: float = ..., + kernel_size: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def textureFlattening( + src: UMat, + mask: UMat, + dst: UMat | None = ..., + low_threshold: float = ..., + high_threshold: float = ..., + kernel_size: int = ..., +) -> UMat: ... + + +@typing.overload +def threshold( + src: cv2.typing.MatLike, + thresh: float, + maxval: float, + type: int, + dst: cv2.typing.MatLike | None = ..., +) -> tuple[ + float, + cv2.typing.MatLike, +]: ... + + +@typing.overload +def threshold(src: UMat, thresh: float, maxval: float, type: int, dst: UMat | None = ...) -> tuple[float, UMat]: ... + + +@typing.overload +def trace(mtx: cv2.typing.MatLike) -> cv2.typing.Scalar: ... +@typing.overload +def trace(mtx: UMat) -> cv2.typing.Scalar: ... + + +@typing.overload +def transform( + src: cv2.typing.MatLike, m: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def transform(src: UMat, m: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def transpose(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def transpose(src: UMat, dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def transposeND( + src: cv2.typing.MatLike, + order: typing.Sequence[int], + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def transposeND(src: UMat, order: typing.Sequence[int], dst: UMat | None = ...) -> UMat: ... + + +@typing.overload +def triangulatePoints( + projMatr1: cv2.typing.MatLike, + projMatr2: cv2.typing.MatLike, + projPoints1: cv2.typing.MatLike, + projPoints2: cv2.typing.MatLike, + points4D: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def triangulatePoints( + projMatr1: UMat, + projMatr2: UMat, + projPoints1: UMat, + projPoints2: UMat, + points4D: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def undistort( + src: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + newCameraMatrix: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def undistort( + src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat | + None = ..., newCameraMatrix: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def undistortImagePoints( + src: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + arg1: cv2.typing.TermCriteria = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def undistortImagePoints( + src: UMat, cameraMatrix: UMat, distCoeffs: UMat, dst: UMat | + None = ..., arg1: cv2.typing.TermCriteria = ..., +) -> UMat: ... + + +@typing.overload +def undistortPoints( + src: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + dst: cv2.typing.MatLike | None = ..., + R: cv2.typing.MatLike | None = ..., + P: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def undistortPoints( + src: UMat, + cameraMatrix: UMat, + distCoeffs: UMat, + dst: UMat | None = ..., + R: UMat | None = ..., + P: UMat | None = ..., +) -> UMat: ... + + +@typing.overload +def undistortPointsIter( + src: cv2.typing.MatLike, + cameraMatrix: cv2.typing.MatLike, + distCoeffs: cv2.typing.MatLike, + R: cv2.typing.MatLike, + P: cv2.typing.MatLike, + criteria: cv2.typing.TermCriteria, + dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def undistortPointsIter( + src: UMat, cameraMatrix: UMat, distCoeffs: UMat, R: UMat, P: UMat, + criteria: cv2.typing.TermCriteria, dst: UMat | None = ..., +) -> UMat: ... + + +def useOpenVX() -> bool: ... + + +def useOptimized() -> bool: ... + + +@typing.overload +def validateDisparity( + disparity: cv2.typing.MatLike, cost: cv2.typing.MatLike, minDisparity: int, + numberOfDisparities: int, disp12MaxDisp: int = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def validateDisparity( + disparity: UMat, + cost: UMat, + minDisparity: int, + numberOfDisparities: int, + disp12MaxDisp: int = ..., +) -> UMat: ... + + +@typing.overload +def vconcat(src: typing.Sequence[cv2.typing.MatLike], dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ... +@typing.overload +def vconcat(src: typing.Sequence[UMat], dst: UMat | None = ...) -> UMat: ... + + +def waitKey(delay: int = ...) -> int: ... + + +def waitKeyEx(delay: int = ...) -> int: ... + + +@typing.overload +def warpAffine( + src: cv2.typing.MatLike, + M: cv2.typing.MatLike, + dsize: cv2.typing.Size, + dst: cv2.typing.MatLike | None = ..., + flags: int = ..., + borderMode: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def warpAffine( + src: UMat, + M: UMat, + dsize: cv2.typing.Size, + dst: UMat | None = ..., + flags: int = ..., + borderMode: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> UMat: ... + + +@typing.overload +def warpPerspective( + src: cv2.typing.MatLike, + M: cv2.typing.MatLike, + dsize: cv2.typing.Size, + dst: cv2.typing.MatLike | None = ..., + flags: int = ..., + borderMode: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def warpPerspective( + src: UMat, + M: UMat, + dsize: cv2.typing.Size, + dst: UMat | None = ..., + flags: int = ..., + borderMode: int = ..., + borderValue: cv2.typing.Scalar = ..., +) -> UMat: ... + + +@typing.overload +def warpPolar( + src: cv2.typing.MatLike, dsize: cv2.typing.Size, center: cv2.typing.Point2f, + maxRadius: float, flags: int, dst: cv2.typing.MatLike | None = ..., +) -> cv2.typing.MatLike: ... + + +@typing.overload +def warpPolar( + src: UMat, dsize: cv2.typing.Size, center: cv2.typing.Point2f, + maxRadius: float, flags: int, dst: UMat | None = ..., +) -> UMat: ... -def bootstrap() -> None: ... +@typing.overload +def watershed(image: cv2.typing.MatLike, markers: cv2.typing.MatLike) -> cv2.typing.MatLike: ... +@typing.overload +def watershed(image: UMat, markers: UMat) -> UMat: ... -Mat: TypeAlias = WrappedMat | _NDArray -# TODO: Make Mat generic with int or float -_MatF: TypeAlias = WrappedMat | _NDArray +@typing.overload +def writeOpticalFlow(path: str, flow: cv2.typing.MatLike) -> bool: ... +@typing.overload +def writeOpticalFlow(path: str, flow: UMat) -> bool: ... diff --git a/typings/cv2/cv2.pyi b/typings/cv2/cv2.pyi deleted file mode 100644 index e46c09fe..00000000 --- a/typings/cv2/cv2.pyi +++ /dev/null @@ -1,5247 +0,0 @@ -from collections.abc import Sequence -from typing import ClassVar, Union, overload - -from _typeshed import Incomplete -from cv2 import Mat, _MatF -from cv2.gapi.streaming import queue_capacity -from typing_extensions import TypeAlias - -# Y047 & Y018 (Unused TypeAlias and TypeVar): Helper types reused everywhere. -# The noqa comments won't be necessary when types in this module are more complete and use the aliases - -# Function argument types -# Convertable to boolean -_Boolean: TypeAlias = bool | int | None -# "a scalar" -_NumericScalar: TypeAlias = float | bool | None -# cv::Scalar -_Scalar: TypeAlias = Mat | _NumericScalar | Sequence[_NumericScalar] -# cv::TermCriteria -_TermCriteria: TypeAlias = Union[tuple[int, int, float], Sequence[float]] -# cv::Point -_Point: TypeAlias = Union[tuple[int, int], Sequence[int]] -# cv::Size -_Size: TypeAlias = Union[tuple[int, int], Sequence[int]] -# cv::Range -_Range: TypeAlias = Union[tuple[int, int], Sequence[int]] -# cv::Point -_PointFloat: TypeAlias = Union[tuple[float, float], Sequence[float]] -# cv::Size -_SizeFloat: TypeAlias = Union[tuple[float, float], Sequence[float]] -# cv::Rect -_Rect: TypeAlias = Union[tuple[int, int, int, int], Sequence[int]] -# cv::Rect -_RectFloat: TypeAlias = Union[tuple[int, int, int, int], Sequence[int]] -# cv::RotatedRect -_RotatedRect: TypeAlias = Union[tuple[_PointFloat, _SizeFloat, float], Sequence[_PointFloat | _SizeFloat | float]] -_RotatedRectResult: TypeAlias = tuple[tuple[float, float], tuple[float, float], float] -# cv:UMat, cv::InputArray, cv::OutputArray and cv::InputOutputArray -_UMat: TypeAlias = UMat | _MatF | _NumericScalar - -# TODO: Complete types until all the aliases below are gone! -# These are temporary placeholder return types, as were in the docstrings signatures from microsoft/python-type-stubs -# This is often (but not always) a sign that a TypeVar should be used to return the same type as a param. -# retval is equivalent to Unknown -_flow: TypeAlias = Incomplete -_image: TypeAlias = Incomplete -_edgeList: TypeAlias = Incomplete -_leadingEdgeList: TypeAlias = Incomplete -_triangleList: TypeAlias = Incomplete -_matches_info: TypeAlias = Incomplete -_arg3: TypeAlias = Incomplete -_outputBlobs: TypeAlias = Incomplete -_layersTypes: TypeAlias = Incomplete -_detections: TypeAlias = Incomplete -_results: TypeAlias = Incomplete -_corners: TypeAlias = Incomplete -_pts: TypeAlias = Incomplete -_dst: TypeAlias = Incomplete -_markers: TypeAlias = Incomplete -_masks: TypeAlias = Incomplete -_window: TypeAlias = Incomplete -_edges: TypeAlias = Incomplete -_lowerBound: TypeAlias = Incomplete -_circles: TypeAlias = Incomplete -_lines: TypeAlias = Incomplete -_hu: TypeAlias = Incomplete -_points2f: TypeAlias = Incomplete -_keypoints: TypeAlias = Incomplete -_mean: TypeAlias = Incomplete -_eigenvectors: TypeAlias = Incomplete -_eigenvalues: TypeAlias = Incomplete -_result: TypeAlias = Incomplete -_mtxR: TypeAlias = Incomplete -_mtxQ: TypeAlias = Incomplete -_Qx: TypeAlias = Incomplete -_Qy: TypeAlias = Incomplete -_Qz: TypeAlias = Incomplete -_jacobian: TypeAlias = Incomplete -_w: TypeAlias = Incomplete -_u: TypeAlias = Incomplete -_vt: TypeAlias = Incomplete -_approxCurve: TypeAlias = Incomplete -_img: TypeAlias = Incomplete -_dist: TypeAlias = Incomplete -_nidx: TypeAlias = Incomplete -_points: TypeAlias = Incomplete -_pyramid: TypeAlias = Incomplete -_covar: TypeAlias = Incomplete -_nextPts: TypeAlias = Incomplete -_status: TypeAlias = Incomplete -_err: TypeAlias = Incomplete -_cameraMatrix: TypeAlias = Incomplete -_distCoeffs: TypeAlias = Incomplete -_rvecs: TypeAlias = Incomplete -_tvecs: TypeAlias = Incomplete -_stdDeviationsIntrinsics: TypeAlias = Incomplete -_stdDeviationsExtrinsics: TypeAlias = Incomplete -_perViewErrors: TypeAlias = Incomplete -_newObjPoints: TypeAlias = Incomplete -_stdDeviationsObjPoints: TypeAlias = Incomplete -_R_cam2gripper: TypeAlias = Incomplete -_t_cam2gripper: TypeAlias = Incomplete -_fovx: TypeAlias = Incomplete -_fovy: TypeAlias = Incomplete -_focalLength: TypeAlias = Incomplete -_principalPoint: TypeAlias = Incomplete -_aspectRatio: TypeAlias = Incomplete -_magnitude: TypeAlias = Incomplete -_angle: TypeAlias = Incomplete -_pt1: TypeAlias = Incomplete -_pt2: TypeAlias = Incomplete -_pos: TypeAlias = Incomplete -_m: TypeAlias = Incomplete -_rvec3: TypeAlias = Incomplete -_tvec3: TypeAlias = Incomplete -_dr3dr1: TypeAlias = Incomplete -_dr3dt1: TypeAlias = Incomplete -_dr3dr2: TypeAlias = Incomplete -_dr3dt2: TypeAlias = Incomplete -_dt3dr1: TypeAlias = Incomplete -_dt3dt1: TypeAlias = Incomplete -_dt3dr2: TypeAlias = Incomplete -_dt3dt2: TypeAlias = Incomplete -_labels: TypeAlias = Incomplete -_stats: TypeAlias = Incomplete -_centroids: TypeAlias = Incomplete -_dstmap1: TypeAlias = Incomplete -_dstmap2: TypeAlias = Incomplete -_hull: TypeAlias = Incomplete -_convexityDefects: TypeAlias = Incomplete -_newPoints1: TypeAlias = Incomplete -_newPoints2: TypeAlias = Incomplete -_grayscale: TypeAlias = Incomplete -_color_boost: TypeAlias = Incomplete -_R1: TypeAlias = Incomplete -_R2: TypeAlias = Incomplete -_t: TypeAlias = Incomplete -_rotations: TypeAlias = Incomplete -_translations: TypeAlias = Incomplete -_normals: TypeAlias = Incomplete -_rotMatrix: TypeAlias = Incomplete -_transVect: TypeAlias = Incomplete -_rotMatrixX: TypeAlias = Incomplete -_rotMatrixY: TypeAlias = Incomplete -_rotMatrixZ: TypeAlias = Incomplete -_eulerAngles: TypeAlias = Incomplete -_outImage: TypeAlias = Incomplete -_outImg: TypeAlias = Incomplete -_inliers: TypeAlias = Incomplete -_out: TypeAlias = Incomplete -_sharpness: TypeAlias = Incomplete -_possibleSolutions: TypeAlias = Incomplete -_buf: TypeAlias = Incomplete -_meta: TypeAlias = Incomplete -_centers: TypeAlias = Incomplete -_contours: TypeAlias = Incomplete -_hierarchy: TypeAlias = Incomplete -_mask: TypeAlias = Incomplete -_idx: TypeAlias = Incomplete -_warpMatrix: TypeAlias = Incomplete -_line: TypeAlias = Incomplete -_rect: TypeAlias = Incomplete -_kx: TypeAlias = Incomplete -_ky: TypeAlias = Incomplete -_validPixROI: TypeAlias = Incomplete -_patch: TypeAlias = Incomplete -_baseLine: TypeAlias = Incomplete -_bgdModel: TypeAlias = Incomplete -_fgdModel: TypeAlias = Incomplete -_rectList: TypeAlias = Incomplete -_weights: TypeAlias = Incomplete -_mats: TypeAlias = Incomplete -_map1: TypeAlias = Incomplete -_map2: TypeAlias = Incomplete -_sum: TypeAlias = Incomplete -_sqsum: TypeAlias = Incomplete -_tilted: TypeAlias = Incomplete -_p12: TypeAlias = Incomplete -_iM: TypeAlias = Incomplete -_bestLabels: TypeAlias = Incomplete -_dABdA: TypeAlias = Incomplete -_dABdB: TypeAlias = Incomplete -_stddev: TypeAlias = Incomplete -_center: TypeAlias = Incomplete -_radius: TypeAlias = Incomplete -_triangle: TypeAlias = Incomplete -_c: TypeAlias = Incomplete -_a: TypeAlias = Incomplete -_dst1: TypeAlias = Incomplete -_dst2: TypeAlias = Incomplete -_response: TypeAlias = Incomplete -_x: TypeAlias = Incomplete -_y: TypeAlias = Incomplete -_imagePoints: TypeAlias = Incomplete -_R: TypeAlias = Incomplete -_R3: TypeAlias = Incomplete -_P1: TypeAlias = Incomplete -_P2: TypeAlias = Incomplete -_P3: TypeAlias = Incomplete -_Q: TypeAlias = Incomplete -_roi1: TypeAlias = Incomplete -_roi2: TypeAlias = Incomplete -_3dImage: TypeAlias = Incomplete -_intersectingRegion: TypeAlias = Incomplete -_blend: TypeAlias = Incomplete -_boundingBoxes: TypeAlias = Incomplete -_mtx: TypeAlias = Incomplete -_roots: TypeAlias = Incomplete -_z: TypeAlias = Incomplete -_rvec: TypeAlias = Incomplete -_tvec: TypeAlias = Incomplete -_reprojectionError: TypeAlias = Incomplete -_dx: TypeAlias = Incomplete -_dy: TypeAlias = Incomplete -_mv: TypeAlias = Incomplete -_cameraMatrix1: TypeAlias = Incomplete -_distCoeffs1: TypeAlias = Incomplete -_cameraMatrix2: TypeAlias = Incomplete -_distCoeffs2: TypeAlias = Incomplete -_T: TypeAlias = Incomplete -_E: TypeAlias = Incomplete -_F: TypeAlias = Incomplete -_validPixROI1: TypeAlias = Incomplete -_validPixROI2: TypeAlias = Incomplete -_H1: TypeAlias = Incomplete -_H2: TypeAlias = Incomplete -_points4D: TypeAlias = Incomplete -_disparity: TypeAlias = Incomplete -_triangulatedPoints: TypeAlias = Incomplete - -__version__: str - -ACCESS_FAST: int -ACCESS_MASK: int -ACCESS_READ: int -ACCESS_RW: int -ACCESS_WRITE: int -ADAPTIVE_THRESH_GAUSSIAN_C: int -ADAPTIVE_THRESH_MEAN_C: int -AGAST_FEATURE_DETECTOR_AGAST_5_8: int -AGAST_FEATURE_DETECTOR_AGAST_7_12D: int -AGAST_FEATURE_DETECTOR_AGAST_7_12S: int -AGAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int -AGAST_FEATURE_DETECTOR_OAST_9_16: int -AGAST_FEATURE_DETECTOR_THRESHOLD: int -AKAZE_DESCRIPTOR_KAZE: int -AKAZE_DESCRIPTOR_KAZE_UPRIGHT: int -AKAZE_DESCRIPTOR_MLDB: int -AKAZE_DESCRIPTOR_MLDB_UPRIGHT: int -AgastFeatureDetector_AGAST_5_8: int -AgastFeatureDetector_AGAST_7_12d: int -AgastFeatureDetector_AGAST_7_12s: int -AgastFeatureDetector_NONMAX_SUPPRESSION: int -AgastFeatureDetector_OAST_9_16: int -AgastFeatureDetector_THRESHOLD: int -BORDER_CONSTANT: int -BORDER_DEFAULT: int -BORDER_ISOLATED: int -BORDER_REFLECT: int -BORDER_REFLECT101: int -BORDER_REFLECT_101: int -BORDER_REPLICATE: int -BORDER_TRANSPARENT: int -BORDER_WRAP: int -CALIB_CB_ACCURACY: int -CALIB_CB_ADAPTIVE_THRESH: int -CALIB_CB_ASYMMETRIC_GRID: int -CALIB_CB_CLUSTERING: int -CALIB_CB_EXHAUSTIVE: int -CALIB_CB_FAST_CHECK: int -CALIB_CB_FILTER_QUADS: int -CALIB_CB_LARGER: int -CALIB_CB_MARKER: int -CALIB_CB_NORMALIZE_IMAGE: int -CALIB_CB_SYMMETRIC_GRID: int -CALIB_FIX_ASPECT_RATIO: int -CALIB_FIX_FOCAL_LENGTH: int -CALIB_FIX_INTRINSIC: int -CALIB_FIX_K1: int -CALIB_FIX_K2: int -CALIB_FIX_K3: int -CALIB_FIX_K4: int -CALIB_FIX_K5: int -CALIB_FIX_K6: int -CALIB_FIX_PRINCIPAL_POINT: int -CALIB_FIX_S1_S2_S3_S4: int -CALIB_FIX_TANGENT_DIST: int -CALIB_FIX_TAUX_TAUY: int -CALIB_HAND_EYE_ANDREFF: int -CALIB_HAND_EYE_DANIILIDIS: int -CALIB_HAND_EYE_HORAUD: int -CALIB_HAND_EYE_PARK: int -CALIB_HAND_EYE_TSAI: int -CALIB_NINTRINSIC: int -CALIB_RATIONAL_MODEL: int -CALIB_ROBOT_WORLD_HAND_EYE_LI: int -CALIB_ROBOT_WORLD_HAND_EYE_SHAH: int -CALIB_SAME_FOCAL_LENGTH: int -CALIB_THIN_PRISM_MODEL: int -CALIB_TILTED_MODEL: int -CALIB_USE_EXTRINSIC_GUESS: int -CALIB_USE_INTRINSIC_GUESS: int -CALIB_USE_LU: int -CALIB_USE_QR: int -CALIB_ZERO_DISPARITY: int -CALIB_ZERO_TANGENT_DIST: int -CAP_ANDROID: int -CAP_ANY: int -CAP_ARAVIS: int -CAP_AVFOUNDATION: int -CAP_CMU1394: int -CAP_DC1394: int -CAP_DSHOW: int -CAP_FFMPEG: int -CAP_FIREWARE: int -CAP_FIREWIRE: int -CAP_GIGANETIX: int -CAP_GPHOTO2: int -CAP_GSTREAMER: int -CAP_IEEE1394: int -CAP_IMAGES: int -CAP_INTELPERC: int -CAP_INTELPERC_DEPTH_GENERATOR: int -CAP_INTELPERC_DEPTH_MAP: int -CAP_INTELPERC_GENERATORS_MASK: int -CAP_INTELPERC_IMAGE: int -CAP_INTELPERC_IMAGE_GENERATOR: int -CAP_INTELPERC_IR_GENERATOR: int -CAP_INTELPERC_IR_MAP: int -CAP_INTELPERC_UVDEPTH_MAP: int -CAP_INTEL_MFX: int -CAP_MSMF: int -CAP_OPENCV_MJPEG: int -CAP_OPENNI: int -CAP_OPENNI2: int -CAP_OPENNI2_ASTRA: int -CAP_OPENNI2_ASUS: int -CAP_OPENNI_ASUS: int -CAP_OPENNI_BGR_IMAGE: int -CAP_OPENNI_DEPTH_GENERATOR: int -CAP_OPENNI_DEPTH_GENERATOR_BASELINE: int -CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH: int -CAP_OPENNI_DEPTH_GENERATOR_PRESENT: int -CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION: int -CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON: int -CAP_OPENNI_DEPTH_MAP: int -CAP_OPENNI_DISPARITY_MAP: int -CAP_OPENNI_DISPARITY_MAP_32F: int -CAP_OPENNI_GENERATORS_MASK: int -CAP_OPENNI_GRAY_IMAGE: int -CAP_OPENNI_IMAGE_GENERATOR: int -CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE: int -CAP_OPENNI_IMAGE_GENERATOR_PRESENT: int -CAP_OPENNI_IR_GENERATOR: int -CAP_OPENNI_IR_GENERATOR_PRESENT: int -CAP_OPENNI_IR_IMAGE: int -CAP_OPENNI_POINT_CLOUD_MAP: int -CAP_OPENNI_QVGA_30HZ: int -CAP_OPENNI_QVGA_60HZ: int -CAP_OPENNI_SXGA_15HZ: int -CAP_OPENNI_SXGA_30HZ: int -CAP_OPENNI_VALID_DEPTH_MASK: int -CAP_OPENNI_VGA_30HZ: int -CAP_PROP_APERTURE: int -CAP_PROP_ARAVIS_AUTOTRIGGER: int -CAP_PROP_AUDIO_BASE_INDEX: int -CAP_PROP_AUDIO_DATA_DEPTH: int -CAP_PROP_AUDIO_POS: int -CAP_PROP_AUDIO_SAMPLES_PER_SECOND: int -CAP_PROP_AUDIO_SHIFT_NSEC: int -CAP_PROP_AUDIO_STREAM: int -CAP_PROP_AUDIO_SYNCHRONIZE: int -CAP_PROP_AUDIO_TOTAL_CHANNELS: int -CAP_PROP_AUDIO_TOTAL_STREAMS: int -CAP_PROP_AUTOFOCUS: int -CAP_PROP_AUTO_EXPOSURE: int -CAP_PROP_AUTO_WB: int -CAP_PROP_BACKEND: int -CAP_PROP_BACKLIGHT: int -CAP_PROP_BITRATE: int -CAP_PROP_BRIGHTNESS: int -CAP_PROP_BUFFERSIZE: int -CAP_PROP_CHANNEL: int -CAP_PROP_CODEC_EXTRADATA_INDEX: int -CAP_PROP_CODEC_PIXEL_FORMAT: int -CAP_PROP_CONTRAST: int -CAP_PROP_CONVERT_RGB: int -CAP_PROP_DC1394_MAX: int -CAP_PROP_DC1394_MODE_AUTO: int -CAP_PROP_DC1394_MODE_MANUAL: int -CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO: int -CAP_PROP_DC1394_OFF: int -CAP_PROP_EXPOSURE: int -CAP_PROP_EXPOSUREPROGRAM: int -CAP_PROP_FOCUS: int -CAP_PROP_FORMAT: int -CAP_PROP_FOURCC: int -CAP_PROP_FPS: int -CAP_PROP_FRAME_COUNT: int -CAP_PROP_FRAME_HEIGHT: int -CAP_PROP_FRAME_WIDTH: int -CAP_PROP_GAIN: int -CAP_PROP_GAMMA: int -CAP_PROP_GIGA_FRAME_HEIGH_MAX: int -CAP_PROP_GIGA_FRAME_OFFSET_X: int -CAP_PROP_GIGA_FRAME_OFFSET_Y: int -CAP_PROP_GIGA_FRAME_SENS_HEIGH: int -CAP_PROP_GIGA_FRAME_SENS_WIDTH: int -CAP_PROP_GIGA_FRAME_WIDTH_MAX: int -CAP_PROP_GPHOTO2_COLLECT_MSGS: int -CAP_PROP_GPHOTO2_FLUSH_MSGS: int -CAP_PROP_GPHOTO2_PREVIEW: int -CAP_PROP_GPHOTO2_RELOAD_CONFIG: int -CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE: int -CAP_PROP_GPHOTO2_WIDGET_ENUMERATE: int -CAP_PROP_GSTREAMER_QUEUE_LENGTH: int -CAP_PROP_GUID: int -CAP_PROP_HUE: int -CAP_PROP_HW_ACCELERATION: int -CAP_PROP_HW_ACCELERATION_USE_OPENCL: int -CAP_PROP_HW_DEVICE: int -CAP_PROP_IMAGES_BASE: int -CAP_PROP_IMAGES_LAST: int -CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD: int -CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ: int -CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT: int -CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE: int -CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE: int -CAP_PROP_INTELPERC_PROFILE_COUNT: int -CAP_PROP_INTELPERC_PROFILE_IDX: int -CAP_PROP_IOS_DEVICE_EXPOSURE: int -CAP_PROP_IOS_DEVICE_FLASH: int -CAP_PROP_IOS_DEVICE_FOCUS: int -CAP_PROP_IOS_DEVICE_TORCH: int -CAP_PROP_IOS_DEVICE_WHITEBALANCE: int -CAP_PROP_IRIS: int -CAP_PROP_ISO_SPEED: int -CAP_PROP_LRF_HAS_KEY_FRAME: int -CAP_PROP_MODE: int -CAP_PROP_MONOCHROME: int -CAP_PROP_OPENNI2_MIRROR: int -CAP_PROP_OPENNI2_SYNC: int -CAP_PROP_OPENNI_APPROX_FRAME_SYNC: int -CAP_PROP_OPENNI_BASELINE: int -CAP_PROP_OPENNI_CIRCLE_BUFFER: int -CAP_PROP_OPENNI_FOCAL_LENGTH: int -CAP_PROP_OPENNI_FRAME_MAX_DEPTH: int -CAP_PROP_OPENNI_GENERATOR_PRESENT: int -CAP_PROP_OPENNI_MAX_BUFFER_SIZE: int -CAP_PROP_OPENNI_MAX_TIME_DURATION: int -CAP_PROP_OPENNI_OUTPUT_MODE: int -CAP_PROP_OPENNI_REGISTRATION: int -CAP_PROP_OPENNI_REGISTRATION_ON: int -CAP_PROP_OPEN_TIMEOUT_MSEC: int -CAP_PROP_ORIENTATION_AUTO: int -CAP_PROP_ORIENTATION_META: int -CAP_PROP_PAN: int -CAP_PROP_POS_AVI_RATIO: int -CAP_PROP_POS_FRAMES: int -CAP_PROP_POS_MSEC: int -CAP_PROP_PVAPI_BINNINGX: int -CAP_PROP_PVAPI_BINNINGY: int -CAP_PROP_PVAPI_DECIMATIONHORIZONTAL: int -CAP_PROP_PVAPI_DECIMATIONVERTICAL: int -CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE: int -CAP_PROP_PVAPI_MULTICASTIP: int -CAP_PROP_PVAPI_PIXELFORMAT: int -CAP_PROP_READ_TIMEOUT_MSEC: int -CAP_PROP_RECTIFICATION: int -CAP_PROP_ROLL: int -CAP_PROP_SAR_DEN: int -CAP_PROP_SAR_NUM: int -CAP_PROP_SATURATION: int -CAP_PROP_SETTINGS: int -CAP_PROP_SHARPNESS: int -CAP_PROP_SPEED: int -CAP_PROP_STREAM_OPEN_TIME_USEC: int -CAP_PROP_TEMPERATURE: int -CAP_PROP_TILT: int -CAP_PROP_TRIGGER: int -CAP_PROP_TRIGGER_DELAY: int -CAP_PROP_VIDEO_STREAM: int -CAP_PROP_VIDEO_TOTAL_CHANNELS: int -CAP_PROP_VIEWFINDER: int -CAP_PROP_WB_TEMPERATURE: int -CAP_PROP_WHITE_BALANCE_BLUE_U: int -CAP_PROP_WHITE_BALANCE_RED_V: int -CAP_PROP_XI_ACQ_BUFFER_SIZE: int -CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT: int -CAP_PROP_XI_ACQ_FRAME_BURST_COUNT: int -CAP_PROP_XI_ACQ_TIMING_MODE: int -CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT: int -CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE: int -CAP_PROP_XI_AEAG: int -CAP_PROP_XI_AEAG_LEVEL: int -CAP_PROP_XI_AEAG_ROI_HEIGHT: int -CAP_PROP_XI_AEAG_ROI_OFFSET_X: int -CAP_PROP_XI_AEAG_ROI_OFFSET_Y: int -CAP_PROP_XI_AEAG_ROI_WIDTH: int -CAP_PROP_XI_AE_MAX_LIMIT: int -CAP_PROP_XI_AG_MAX_LIMIT: int -CAP_PROP_XI_APPLY_CMS: int -CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION: int -CAP_PROP_XI_AUTO_WB: int -CAP_PROP_XI_AVAILABLE_BANDWIDTH: int -CAP_PROP_XI_BINNING_HORIZONTAL: int -CAP_PROP_XI_BINNING_PATTERN: int -CAP_PROP_XI_BINNING_SELECTOR: int -CAP_PROP_XI_BINNING_VERTICAL: int -CAP_PROP_XI_BPC: int -CAP_PROP_XI_BUFFERS_QUEUE_SIZE: int -CAP_PROP_XI_BUFFER_POLICY: int -CAP_PROP_XI_CC_MATRIX_00: int -CAP_PROP_XI_CC_MATRIX_01: int -CAP_PROP_XI_CC_MATRIX_02: int -CAP_PROP_XI_CC_MATRIX_03: int -CAP_PROP_XI_CC_MATRIX_10: int -CAP_PROP_XI_CC_MATRIX_11: int -CAP_PROP_XI_CC_MATRIX_12: int -CAP_PROP_XI_CC_MATRIX_13: int -CAP_PROP_XI_CC_MATRIX_20: int -CAP_PROP_XI_CC_MATRIX_21: int -CAP_PROP_XI_CC_MATRIX_22: int -CAP_PROP_XI_CC_MATRIX_23: int -CAP_PROP_XI_CC_MATRIX_30: int -CAP_PROP_XI_CC_MATRIX_31: int -CAP_PROP_XI_CC_MATRIX_32: int -CAP_PROP_XI_CC_MATRIX_33: int -CAP_PROP_XI_CHIP_TEMP: int -CAP_PROP_XI_CMS: int -CAP_PROP_XI_COLOR_FILTER_ARRAY: int -CAP_PROP_XI_COLUMN_FPN_CORRECTION: int -CAP_PROP_XI_COOLING: int -CAP_PROP_XI_COUNTER_SELECTOR: int -CAP_PROP_XI_COUNTER_VALUE: int -CAP_PROP_XI_DATA_FORMAT: int -CAP_PROP_XI_DEBOUNCE_EN: int -CAP_PROP_XI_DEBOUNCE_POL: int -CAP_PROP_XI_DEBOUNCE_T0: int -CAP_PROP_XI_DEBOUNCE_T1: int -CAP_PROP_XI_DEBUG_LEVEL: int -CAP_PROP_XI_DECIMATION_HORIZONTAL: int -CAP_PROP_XI_DECIMATION_PATTERN: int -CAP_PROP_XI_DECIMATION_SELECTOR: int -CAP_PROP_XI_DECIMATION_VERTICAL: int -CAP_PROP_XI_DEFAULT_CC_MATRIX: int -CAP_PROP_XI_DEVICE_MODEL_ID: int -CAP_PROP_XI_DEVICE_RESET: int -CAP_PROP_XI_DEVICE_SN: int -CAP_PROP_XI_DOWNSAMPLING: int -CAP_PROP_XI_DOWNSAMPLING_TYPE: int -CAP_PROP_XI_EXPOSURE: int -CAP_PROP_XI_EXPOSURE_BURST_COUNT: int -CAP_PROP_XI_EXP_PRIORITY: int -CAP_PROP_XI_FFS_ACCESS_KEY: int -CAP_PROP_XI_FFS_FILE_ID: int -CAP_PROP_XI_FFS_FILE_SIZE: int -CAP_PROP_XI_FRAMERATE: int -CAP_PROP_XI_FREE_FFS_SIZE: int -CAP_PROP_XI_GAIN: int -CAP_PROP_XI_GAIN_SELECTOR: int -CAP_PROP_XI_GAMMAC: int -CAP_PROP_XI_GAMMAY: int -CAP_PROP_XI_GPI_LEVEL: int -CAP_PROP_XI_GPI_MODE: int -CAP_PROP_XI_GPI_SELECTOR: int -CAP_PROP_XI_GPO_MODE: int -CAP_PROP_XI_GPO_SELECTOR: int -CAP_PROP_XI_HDR: int -CAP_PROP_XI_HDR_KNEEPOINT_COUNT: int -CAP_PROP_XI_HDR_T1: int -CAP_PROP_XI_HDR_T2: int -CAP_PROP_XI_HEIGHT: int -CAP_PROP_XI_HOUS_BACK_SIDE_TEMP: int -CAP_PROP_XI_HOUS_TEMP: int -CAP_PROP_XI_HW_REVISION: int -CAP_PROP_XI_IMAGE_BLACK_LEVEL: int -CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH: int -CAP_PROP_XI_IMAGE_DATA_FORMAT: int -CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA: int -CAP_PROP_XI_IMAGE_IS_COLOR: int -CAP_PROP_XI_IMAGE_PAYLOAD_SIZE: int -CAP_PROP_XI_IS_COOLED: int -CAP_PROP_XI_IS_DEVICE_EXIST: int -CAP_PROP_XI_KNEEPOINT1: int -CAP_PROP_XI_KNEEPOINT2: int -CAP_PROP_XI_LED_MODE: int -CAP_PROP_XI_LED_SELECTOR: int -CAP_PROP_XI_LENS_APERTURE_VALUE: int -CAP_PROP_XI_LENS_FEATURE: int -CAP_PROP_XI_LENS_FEATURE_SELECTOR: int -CAP_PROP_XI_LENS_FOCAL_LENGTH: int -CAP_PROP_XI_LENS_FOCUS_DISTANCE: int -CAP_PROP_XI_LENS_FOCUS_MOVE: int -CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE: int -CAP_PROP_XI_LENS_MODE: int -CAP_PROP_XI_LIMIT_BANDWIDTH: int -CAP_PROP_XI_LUT_EN: int -CAP_PROP_XI_LUT_INDEX: int -CAP_PROP_XI_LUT_VALUE: int -CAP_PROP_XI_MANUAL_WB: int -CAP_PROP_XI_OFFSET_X: int -CAP_PROP_XI_OFFSET_Y: int -CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH: int -CAP_PROP_XI_OUTPUT_DATA_PACKING: int -CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE: int -CAP_PROP_XI_RECENT_FRAME: int -CAP_PROP_XI_REGION_MODE: int -CAP_PROP_XI_REGION_SELECTOR: int -CAP_PROP_XI_ROW_FPN_CORRECTION: int -CAP_PROP_XI_SENSOR_BOARD_TEMP: int -CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ: int -CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX: int -CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH: int -CAP_PROP_XI_SENSOR_FEATURE_SELECTOR: int -CAP_PROP_XI_SENSOR_FEATURE_VALUE: int -CAP_PROP_XI_SENSOR_MODE: int -CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT: int -CAP_PROP_XI_SENSOR_TAPS: int -CAP_PROP_XI_SHARPNESS: int -CAP_PROP_XI_SHUTTER_TYPE: int -CAP_PROP_XI_TARGET_TEMP: int -CAP_PROP_XI_TEST_PATTERN: int -CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR: int -CAP_PROP_XI_TIMEOUT: int -CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT: int -CAP_PROP_XI_TRG_DELAY: int -CAP_PROP_XI_TRG_SELECTOR: int -CAP_PROP_XI_TRG_SOFTWARE: int -CAP_PROP_XI_TRG_SOURCE: int -CAP_PROP_XI_TS_RST_MODE: int -CAP_PROP_XI_TS_RST_SOURCE: int -CAP_PROP_XI_USED_FFS_SIZE: int -CAP_PROP_XI_WB_KB: int -CAP_PROP_XI_WB_KG: int -CAP_PROP_XI_WB_KR: int -CAP_PROP_XI_WIDTH: int -CAP_PROP_ZOOM: int -CAP_PVAPI: int -CAP_PVAPI_DECIMATION_2OUTOF16: int -CAP_PVAPI_DECIMATION_2OUTOF4: int -CAP_PVAPI_DECIMATION_2OUTOF8: int -CAP_PVAPI_DECIMATION_OFF: int -CAP_PVAPI_FSTRIGMODE_FIXEDRATE: int -CAP_PVAPI_FSTRIGMODE_FREERUN: int -CAP_PVAPI_FSTRIGMODE_SOFTWARE: int -CAP_PVAPI_FSTRIGMODE_SYNCIN1: int -CAP_PVAPI_FSTRIGMODE_SYNCIN2: int -CAP_PVAPI_PIXELFORMAT_BAYER16: int -CAP_PVAPI_PIXELFORMAT_BAYER8: int -CAP_PVAPI_PIXELFORMAT_BGR24: int -CAP_PVAPI_PIXELFORMAT_BGRA32: int -CAP_PVAPI_PIXELFORMAT_MONO16: int -CAP_PVAPI_PIXELFORMAT_MONO8: int -CAP_PVAPI_PIXELFORMAT_RGB24: int -CAP_PVAPI_PIXELFORMAT_RGBA32: int -CAP_QT: int -CAP_REALSENSE: int -CAP_UEYE: int -CAP_UNICAP: int -CAP_V4L: int -CAP_V4L2: int -CAP_VFW: int -CAP_WINRT: int -CAP_XIAPI: int -CAP_XINE: int -CASCADE_DO_CANNY_PRUNING: int -CASCADE_DO_ROUGH_SEARCH: int -CASCADE_FIND_BIGGEST_OBJECT: int -CASCADE_SCALE_IMAGE: int -CCL_BBDT: int -CCL_BOLELLI: int -CCL_DEFAULT: int -CCL_GRANA: int -CCL_SAUF: int -CCL_SPAGHETTI: int -CCL_WU: int -CC_STAT_AREA: int -CC_STAT_HEIGHT: int -CC_STAT_LEFT: int -CC_STAT_MAX: int -CC_STAT_TOP: int -CC_STAT_WIDTH: int -CHAIN_APPROX_NONE: int -CHAIN_APPROX_SIMPLE: int -CHAIN_APPROX_TC89_KCOS: int -CHAIN_APPROX_TC89_L1: int -CIRCLES_GRID_FINDER_PARAMETERS_ASYMMETRIC_GRID: int -CIRCLES_GRID_FINDER_PARAMETERS_SYMMETRIC_GRID: int -CMP_EQ: int -CMP_GE: int -CMP_GT: int -CMP_LE: int -CMP_LT: int -CMP_NE: int -COLORMAP_AUTUMN: int -COLORMAP_BONE: int -COLORMAP_CIVIDIS: int -COLORMAP_COOL: int -COLORMAP_DEEPGREEN: int -COLORMAP_HOT: int -COLORMAP_HSV: int -COLORMAP_INFERNO: int -COLORMAP_JET: int -COLORMAP_MAGMA: int -COLORMAP_OCEAN: int -COLORMAP_PARULA: int -COLORMAP_PINK: int -COLORMAP_PLASMA: int -COLORMAP_RAINBOW: int -COLORMAP_SPRING: int -COLORMAP_SUMMER: int -COLORMAP_TURBO: int -COLORMAP_TWILIGHT: int -COLORMAP_TWILIGHT_SHIFTED: int -COLORMAP_VIRIDIS: int -COLORMAP_WINTER: int -COLOR_BAYER_BG2BGR: int -COLOR_BAYER_BG2BGRA: int -COLOR_BAYER_BG2BGR_EA: int -COLOR_BAYER_BG2BGR_VNG: int -COLOR_BAYER_BG2GRAY: int -COLOR_BAYER_BG2RGB: int -COLOR_BAYER_BG2RGBA: int -COLOR_BAYER_BG2RGB_EA: int -COLOR_BAYER_BG2RGB_VNG: int -COLOR_BAYER_BGGR2BGR: int -COLOR_BAYER_BGGR2BGRA: int -COLOR_BAYER_BGGR2BGR_EA: int -COLOR_BAYER_BGGR2BGR_VNG: int -COLOR_BAYER_BGGR2GRAY: int -COLOR_BAYER_BGGR2RGB: int -COLOR_BAYER_BGGR2RGBA: int -COLOR_BAYER_BGGR2RGB_EA: int -COLOR_BAYER_BGGR2RGB_VNG: int -COLOR_BAYER_GB2BGR: int -COLOR_BAYER_GB2BGRA: int -COLOR_BAYER_GB2BGR_EA: int -COLOR_BAYER_GB2BGR_VNG: int -COLOR_BAYER_GB2GRAY: int -COLOR_BAYER_GB2RGB: int -COLOR_BAYER_GB2RGBA: int -COLOR_BAYER_GB2RGB_EA: int -COLOR_BAYER_GB2RGB_VNG: int -COLOR_BAYER_GBRG2BGR: int -COLOR_BAYER_GBRG2BGRA: int -COLOR_BAYER_GBRG2BGR_EA: int -COLOR_BAYER_GBRG2BGR_VNG: int -COLOR_BAYER_GBRG2GRAY: int -COLOR_BAYER_GBRG2RGB: int -COLOR_BAYER_GBRG2RGBA: int -COLOR_BAYER_GBRG2RGB_EA: int -COLOR_BAYER_GBRG2RGB_VNG: int -COLOR_BAYER_GR2BGR: int -COLOR_BAYER_GR2BGRA: int -COLOR_BAYER_GR2BGR_EA: int -COLOR_BAYER_GR2BGR_VNG: int -COLOR_BAYER_GR2GRAY: int -COLOR_BAYER_GR2RGB: int -COLOR_BAYER_GR2RGBA: int -COLOR_BAYER_GR2RGB_EA: int -COLOR_BAYER_GR2RGB_VNG: int -COLOR_BAYER_GRBG2BGR: int -COLOR_BAYER_GRBG2BGRA: int -COLOR_BAYER_GRBG2BGR_EA: int -COLOR_BAYER_GRBG2BGR_VNG: int -COLOR_BAYER_GRBG2GRAY: int -COLOR_BAYER_GRBG2RGB: int -COLOR_BAYER_GRBG2RGBA: int -COLOR_BAYER_GRBG2RGB_EA: int -COLOR_BAYER_GRBG2RGB_VNG: int -COLOR_BAYER_RG2BGR: int -COLOR_BAYER_RG2BGRA: int -COLOR_BAYER_RG2BGR_EA: int -COLOR_BAYER_RG2BGR_VNG: int -COLOR_BAYER_RG2GRAY: int -COLOR_BAYER_RG2RGB: int -COLOR_BAYER_RG2RGBA: int -COLOR_BAYER_RG2RGB_EA: int -COLOR_BAYER_RG2RGB_VNG: int -COLOR_BAYER_RGGB2BGR: int -COLOR_BAYER_RGGB2BGRA: int -COLOR_BAYER_RGGB2BGR_EA: int -COLOR_BAYER_RGGB2BGR_VNG: int -COLOR_BAYER_RGGB2GRAY: int -COLOR_BAYER_RGGB2RGB: int -COLOR_BAYER_RGGB2RGBA: int -COLOR_BAYER_RGGB2RGB_EA: int -COLOR_BAYER_RGGB2RGB_VNG: int -COLOR_BGR2BGR555: int -COLOR_BGR2BGR565: int -COLOR_BGR2BGRA: int -COLOR_BGR2GRAY: int -COLOR_BGR2HLS: int -COLOR_BGR2HLS_FULL: int -COLOR_BGR2HSV: int -COLOR_BGR2HSV_FULL: int -COLOR_BGR2LAB: int -COLOR_BGR2LUV: int -COLOR_BGR2Lab: int -COLOR_BGR2Luv: int -COLOR_BGR2RGB: int -COLOR_BGR2RGBA: int -COLOR_BGR2XYZ: int -COLOR_BGR2YCR_CB: int -COLOR_BGR2YCrCb: int -COLOR_BGR2YUV: int -COLOR_BGR2YUV_I420: int -COLOR_BGR2YUV_IYUV: int -COLOR_BGR2YUV_YV12: int -COLOR_BGR5552BGR: int -COLOR_BGR5552BGRA: int -COLOR_BGR5552GRAY: int -COLOR_BGR5552RGB: int -COLOR_BGR5552RGBA: int -COLOR_BGR5652BGR: int -COLOR_BGR5652BGRA: int -COLOR_BGR5652GRAY: int -COLOR_BGR5652RGB: int -COLOR_BGR5652RGBA: int -COLOR_BGRA2BGR: int -COLOR_BGRA2BGR555: int -COLOR_BGRA2BGR565: int -COLOR_BGRA2GRAY: int -COLOR_BGRA2RGB: int -COLOR_BGRA2RGBA: int -COLOR_BGRA2YUV_I420: int -COLOR_BGRA2YUV_IYUV: int -COLOR_BGRA2YUV_YV12: int -COLOR_BayerBG2BGR: int -COLOR_BayerBG2BGRA: int -COLOR_BayerBG2BGR_EA: int -COLOR_BayerBG2BGR_VNG: int -COLOR_BayerBG2GRAY: int -COLOR_BayerBG2RGB: int -COLOR_BayerBG2RGBA: int -COLOR_BayerBG2RGB_EA: int -COLOR_BayerBG2RGB_VNG: int -COLOR_BayerBGGR2BGR: int -COLOR_BayerBGGR2BGRA: int -COLOR_BayerBGGR2BGR_EA: int -COLOR_BayerBGGR2BGR_VNG: int -COLOR_BayerBGGR2GRAY: int -COLOR_BayerBGGR2RGB: int -COLOR_BayerBGGR2RGBA: int -COLOR_BayerBGGR2RGB_EA: int -COLOR_BayerBGGR2RGB_VNG: int -COLOR_BayerGB2BGR: int -COLOR_BayerGB2BGRA: int -COLOR_BayerGB2BGR_EA: int -COLOR_BayerGB2BGR_VNG: int -COLOR_BayerGB2GRAY: int -COLOR_BayerGB2RGB: int -COLOR_BayerGB2RGBA: int -COLOR_BayerGB2RGB_EA: int -COLOR_BayerGB2RGB_VNG: int -COLOR_BayerGBRG2BGR: int -COLOR_BayerGBRG2BGRA: int -COLOR_BayerGBRG2BGR_EA: int -COLOR_BayerGBRG2BGR_VNG: int -COLOR_BayerGBRG2GRAY: int -COLOR_BayerGBRG2RGB: int -COLOR_BayerGBRG2RGBA: int -COLOR_BayerGBRG2RGB_EA: int -COLOR_BayerGBRG2RGB_VNG: int -COLOR_BayerGR2BGR: int -COLOR_BayerGR2BGRA: int -COLOR_BayerGR2BGR_EA: int -COLOR_BayerGR2BGR_VNG: int -COLOR_BayerGR2GRAY: int -COLOR_BayerGR2RGB: int -COLOR_BayerGR2RGBA: int -COLOR_BayerGR2RGB_EA: int -COLOR_BayerGR2RGB_VNG: int -COLOR_BayerGRBG2BGR: int -COLOR_BayerGRBG2BGRA: int -COLOR_BayerGRBG2BGR_EA: int -COLOR_BayerGRBG2BGR_VNG: int -COLOR_BayerGRBG2GRAY: int -COLOR_BayerGRBG2RGB: int -COLOR_BayerGRBG2RGBA: int -COLOR_BayerGRBG2RGB_EA: int -COLOR_BayerGRBG2RGB_VNG: int -COLOR_BayerRG2BGR: int -COLOR_BayerRG2BGRA: int -COLOR_BayerRG2BGR_EA: int -COLOR_BayerRG2BGR_VNG: int -COLOR_BayerRG2GRAY: int -COLOR_BayerRG2RGB: int -COLOR_BayerRG2RGBA: int -COLOR_BayerRG2RGB_EA: int -COLOR_BayerRG2RGB_VNG: int -COLOR_BayerRGGB2BGR: int -COLOR_BayerRGGB2BGRA: int -COLOR_BayerRGGB2BGR_EA: int -COLOR_BayerRGGB2BGR_VNG: int -COLOR_BayerRGGB2GRAY: int -COLOR_BayerRGGB2RGB: int -COLOR_BayerRGGB2RGBA: int -COLOR_BayerRGGB2RGB_EA: int -COLOR_BayerRGGB2RGB_VNG: int -COLOR_COLORCVT_MAX: int -COLOR_GRAY2BGR: int -COLOR_GRAY2BGR555: int -COLOR_GRAY2BGR565: int -COLOR_GRAY2BGRA: int -COLOR_GRAY2RGB: int -COLOR_GRAY2RGBA: int -COLOR_HLS2BGR: int -COLOR_HLS2BGR_FULL: int -COLOR_HLS2RGB: int -COLOR_HLS2RGB_FULL: int -COLOR_HSV2BGR: int -COLOR_HSV2BGR_FULL: int -COLOR_HSV2RGB: int -COLOR_HSV2RGB_FULL: int -COLOR_LAB2BGR: int -COLOR_LAB2LBGR: int -COLOR_LAB2LRGB: int -COLOR_LAB2RGB: int -COLOR_LBGR2LAB: int -COLOR_LBGR2LUV: int -COLOR_LBGR2Lab: int -COLOR_LBGR2Luv: int -COLOR_LRGB2LAB: int -COLOR_LRGB2LUV: int -COLOR_LRGB2Lab: int -COLOR_LRGB2Luv: int -COLOR_LUV2BGR: int -COLOR_LUV2LBGR: int -COLOR_LUV2LRGB: int -COLOR_LUV2RGB: int -COLOR_Lab2BGR: int -COLOR_Lab2LBGR: int -COLOR_Lab2LRGB: int -COLOR_Lab2RGB: int -COLOR_Luv2BGR: int -COLOR_Luv2LBGR: int -COLOR_Luv2LRGB: int -COLOR_Luv2RGB: int -COLOR_M_RGBA2RGBA: int -COLOR_RGB2BGR: int -COLOR_RGB2BGR555: int -COLOR_RGB2BGR565: int -COLOR_RGB2BGRA: int -COLOR_RGB2GRAY: int -COLOR_RGB2HLS: int -COLOR_RGB2HLS_FULL: int -COLOR_RGB2HSV: int -COLOR_RGB2HSV_FULL: int -COLOR_RGB2LAB: int -COLOR_RGB2LUV: int -COLOR_RGB2Lab: int -COLOR_RGB2Luv: int -COLOR_RGB2RGBA: int -COLOR_RGB2XYZ: int -COLOR_RGB2YCR_CB: int -COLOR_RGB2YCrCb: int -COLOR_RGB2YUV: int -COLOR_RGB2YUV_I420: int -COLOR_RGB2YUV_IYUV: int -COLOR_RGB2YUV_YV12: int -COLOR_RGBA2BGR: int -COLOR_RGBA2BGR555: int -COLOR_RGBA2BGR565: int -COLOR_RGBA2BGRA: int -COLOR_RGBA2GRAY: int -COLOR_RGBA2M_RGBA: int -COLOR_RGBA2RGB: int -COLOR_RGBA2YUV_I420: int -COLOR_RGBA2YUV_IYUV: int -COLOR_RGBA2YUV_YV12: int -COLOR_RGBA2mRGBA: int -COLOR_XYZ2BGR: int -COLOR_XYZ2RGB: int -COLOR_YCR_CB2BGR: int -COLOR_YCR_CB2RGB: int -COLOR_YCrCb2BGR: int -COLOR_YCrCb2RGB: int -COLOR_YUV2BGR: int -COLOR_YUV2BGRA_I420: int -COLOR_YUV2BGRA_IYUV: int -COLOR_YUV2BGRA_NV12: int -COLOR_YUV2BGRA_NV21: int -COLOR_YUV2BGRA_UYNV: int -COLOR_YUV2BGRA_UYVY: int -COLOR_YUV2BGRA_Y422: int -COLOR_YUV2BGRA_YUNV: int -COLOR_YUV2BGRA_YUY2: int -COLOR_YUV2BGRA_YUYV: int -COLOR_YUV2BGRA_YV12: int -COLOR_YUV2BGRA_YVYU: int -COLOR_YUV2BGR_I420: int -COLOR_YUV2BGR_IYUV: int -COLOR_YUV2BGR_NV12: int -COLOR_YUV2BGR_NV21: int -COLOR_YUV2BGR_UYNV: int -COLOR_YUV2BGR_UYVY: int -COLOR_YUV2BGR_Y422: int -COLOR_YUV2BGR_YUNV: int -COLOR_YUV2BGR_YUY2: int -COLOR_YUV2BGR_YUYV: int -COLOR_YUV2BGR_YV12: int -COLOR_YUV2BGR_YVYU: int -COLOR_YUV2GRAY_420: int -COLOR_YUV2GRAY_I420: int -COLOR_YUV2GRAY_IYUV: int -COLOR_YUV2GRAY_NV12: int -COLOR_YUV2GRAY_NV21: int -COLOR_YUV2GRAY_UYNV: int -COLOR_YUV2GRAY_UYVY: int -COLOR_YUV2GRAY_Y422: int -COLOR_YUV2GRAY_YUNV: int -COLOR_YUV2GRAY_YUY2: int -COLOR_YUV2GRAY_YUYV: int -COLOR_YUV2GRAY_YV12: int -COLOR_YUV2GRAY_YVYU: int -COLOR_YUV2RGB: int -COLOR_YUV2RGBA_I420: int -COLOR_YUV2RGBA_IYUV: int -COLOR_YUV2RGBA_NV12: int -COLOR_YUV2RGBA_NV21: int -COLOR_YUV2RGBA_UYNV: int -COLOR_YUV2RGBA_UYVY: int -COLOR_YUV2RGBA_Y422: int -COLOR_YUV2RGBA_YUNV: int -COLOR_YUV2RGBA_YUY2: int -COLOR_YUV2RGBA_YUYV: int -COLOR_YUV2RGBA_YV12: int -COLOR_YUV2RGBA_YVYU: int -COLOR_YUV2RGB_I420: int -COLOR_YUV2RGB_IYUV: int -COLOR_YUV2RGB_NV12: int -COLOR_YUV2RGB_NV21: int -COLOR_YUV2RGB_UYNV: int -COLOR_YUV2RGB_UYVY: int -COLOR_YUV2RGB_Y422: int -COLOR_YUV2RGB_YUNV: int -COLOR_YUV2RGB_YUY2: int -COLOR_YUV2RGB_YUYV: int -COLOR_YUV2RGB_YV12: int -COLOR_YUV2RGB_YVYU: int -COLOR_YUV420P2BGR: int -COLOR_YUV420P2BGRA: int -COLOR_YUV420P2GRAY: int -COLOR_YUV420P2RGB: int -COLOR_YUV420P2RGBA: int -COLOR_YUV420SP2BGR: int -COLOR_YUV420SP2BGRA: int -COLOR_YUV420SP2GRAY: int -COLOR_YUV420SP2RGB: int -COLOR_YUV420SP2RGBA: int -COLOR_YUV420p2BGR: int -COLOR_YUV420p2BGRA: int -COLOR_YUV420p2GRAY: int -COLOR_YUV420p2RGB: int -COLOR_YUV420p2RGBA: int -COLOR_YUV420sp2BGR: int -COLOR_YUV420sp2BGRA: int -COLOR_YUV420sp2GRAY: int -COLOR_YUV420sp2RGB: int -COLOR_YUV420sp2RGBA: int -COLOR_mRGBA2RGBA: int -CONTOURS_MATCH_I1: int -CONTOURS_MATCH_I2: int -CONTOURS_MATCH_I3: int -COVAR_COLS: int -COVAR_NORMAL: int -COVAR_ROWS: int -COVAR_SCALE: int -COVAR_SCRAMBLED: int -COVAR_USE_AVG: int -CV_16S: int -CV_16SC1: int -CV_16SC2: int -CV_16SC3: int -CV_16SC4: int -CV_16U: int -CV_16UC1: int -CV_16UC2: int -CV_16UC3: int -CV_16UC4: int -CV_32F: int -CV_32FC1: int -CV_32FC2: int -CV_32FC3: int -CV_32FC4: int -CV_32S: int -CV_32SC1: int -CV_32SC2: int -CV_32SC3: int -CV_32SC4: int -CV_64F: int -CV_64FC1: int -CV_64FC2: int -CV_64FC3: int -CV_64FC4: int -CV_8S: int -CV_8SC1: int -CV_8SC2: int -CV_8SC3: int -CV_8SC4: int -CV_8U: int -CV_8UC1: int -CV_8UC2: int -CV_8UC3: int -CV_8UC4: int -CirclesGridFinderParameters_ASYMMETRIC_GRID: int -CirclesGridFinderParameters_SYMMETRIC_GRID: int -DCT_INVERSE: int -DCT_ROWS: int -DECOMP_CHOLESKY: int -DECOMP_EIG: int -DECOMP_LU: int -DECOMP_NORMAL: int -DECOMP_QR: int -DECOMP_SVD: int -DESCRIPTOR_MATCHER_BRUTEFORCE: int -DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING: int -DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMINGLUT: int -DESCRIPTOR_MATCHER_BRUTEFORCE_L1: int -DESCRIPTOR_MATCHER_BRUTEFORCE_SL2: int -DESCRIPTOR_MATCHER_FLANNBASED: int -DFT_COMPLEX_INPUT: int -DFT_COMPLEX_OUTPUT: int -DFT_INVERSE: int -DFT_REAL_OUTPUT: int -DFT_ROWS: int -DFT_SCALE: int -DISOPTICAL_FLOW_PRESET_FAST: int -DISOPTICAL_FLOW_PRESET_MEDIUM: int -DISOPTICAL_FLOW_PRESET_ULTRAFAST: int -DISOpticalFlow_PRESET_FAST: int -DISOpticalFlow_PRESET_MEDIUM: int -DISOpticalFlow_PRESET_ULTRAFAST: int -DIST_C: int -DIST_FAIR: int -DIST_HUBER: int -DIST_L1: int -DIST_L12: int -DIST_L2: int -DIST_LABEL_CCOMP: int -DIST_LABEL_PIXEL: int -DIST_MASK_3: int -DIST_MASK_5: int -DIST_MASK_PRECISE: int -DIST_USER: int -DIST_WELSCH: int -DRAW_MATCHES_FLAGS_DEFAULT: int -DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG: int -DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS: int -DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS: int -DescriptorMatcher_BRUTEFORCE: int -DescriptorMatcher_BRUTEFORCE_HAMMING: int -DescriptorMatcher_BRUTEFORCE_HAMMINGLUT: int -DescriptorMatcher_BRUTEFORCE_L1: int -DescriptorMatcher_BRUTEFORCE_SL2: int -DescriptorMatcher_FLANNBASED: int -DrawMatchesFlags_DEFAULT: int -DrawMatchesFlags_DRAW_OVER_OUTIMG: int -DrawMatchesFlags_DRAW_RICH_KEYPOINTS: int -DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS: int -EVENT_FLAG_ALTKEY: int -EVENT_FLAG_CTRLKEY: int -EVENT_FLAG_LBUTTON: int -EVENT_FLAG_MBUTTON: int -EVENT_FLAG_RBUTTON: int -EVENT_FLAG_SHIFTKEY: int -EVENT_LBUTTONDBLCLK: int -EVENT_LBUTTONDOWN: int -EVENT_LBUTTONUP: int -EVENT_MBUTTONDBLCLK: int -EVENT_MBUTTONDOWN: int -EVENT_MBUTTONUP: int -EVENT_MOUSEHWHEEL: int -EVENT_MOUSEMOVE: int -EVENT_MOUSEWHEEL: int -EVENT_RBUTTONDBLCLK: int -EVENT_RBUTTONDOWN: int -EVENT_RBUTTONUP: int -FACE_RECOGNIZER_SF_FR_COSINE: int -FACE_RECOGNIZER_SF_FR_NORM_L2: int -FAST_FEATURE_DETECTOR_FAST_N: int -FAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION: int -FAST_FEATURE_DETECTOR_THRESHOLD: int -FAST_FEATURE_DETECTOR_TYPE_5_8: int -FAST_FEATURE_DETECTOR_TYPE_7_12: int -FAST_FEATURE_DETECTOR_TYPE_9_16: int -FILE_NODE_EMPTY: int -FILE_NODE_FLOAT: int -FILE_NODE_FLOW: int -FILE_NODE_INT: int -FILE_NODE_MAP: int -FILE_NODE_NAMED: int -FILE_NODE_NONE: int -FILE_NODE_REAL: int -FILE_NODE_SEQ: int -FILE_NODE_STR: int -FILE_NODE_STRING: int -FILE_NODE_TYPE_MASK: int -FILE_NODE_UNIFORM: int -FILE_STORAGE_APPEND: int -FILE_STORAGE_BASE64: int -FILE_STORAGE_FORMAT_AUTO: int -FILE_STORAGE_FORMAT_JSON: int -FILE_STORAGE_FORMAT_MASK: int -FILE_STORAGE_FORMAT_XML: int -FILE_STORAGE_FORMAT_YAML: int -FILE_STORAGE_INSIDE_MAP: int -FILE_STORAGE_MEMORY: int -FILE_STORAGE_NAME_EXPECTED: int -FILE_STORAGE_READ: int -FILE_STORAGE_UNDEFINED: int -FILE_STORAGE_VALUE_EXPECTED: int -FILE_STORAGE_WRITE: int -FILE_STORAGE_WRITE_BASE64: int -FILLED: int -FILTER_SCHARR: int -FLOODFILL_FIXED_RANGE: int -FLOODFILL_MASK_ONLY: int -FM_7POINT: int -FM_8POINT: int -FM_LMEDS: int -FM_RANSAC: int -FONT_HERSHEY_COMPLEX: int -FONT_HERSHEY_COMPLEX_SMALL: int -FONT_HERSHEY_DUPLEX: int -FONT_HERSHEY_PLAIN: int -FONT_HERSHEY_SCRIPT_COMPLEX: int -FONT_HERSHEY_SCRIPT_SIMPLEX: int -FONT_HERSHEY_SIMPLEX: int -FONT_HERSHEY_TRIPLEX: int -FONT_ITALIC: int -FORMATTER_FMT_C: int -FORMATTER_FMT_CSV: int -FORMATTER_FMT_DEFAULT: int -FORMATTER_FMT_MATLAB: int -FORMATTER_FMT_NUMPY: int -FORMATTER_FMT_PYTHON: int -FaceRecognizerSF_FR_COSINE: int -FaceRecognizerSF_FR_NORM_L2: int -FastFeatureDetector_FAST_N: int -FastFeatureDetector_NONMAX_SUPPRESSION: int -FastFeatureDetector_THRESHOLD: int -FastFeatureDetector_TYPE_5_8: int -FastFeatureDetector_TYPE_7_12: int -FastFeatureDetector_TYPE_9_16: int -FileNode_EMPTY: int -FileNode_FLOAT: int -FileNode_FLOW: int -FileNode_INT: int -FileNode_MAP: int -FileNode_NAMED: int -FileNode_NONE: int -FileNode_REAL: int -FileNode_SEQ: int -FileNode_STR: int -FileNode_STRING: int -FileNode_TYPE_MASK: int -FileNode_UNIFORM: int -FileStorage_APPEND: int -FileStorage_BASE64: int -FileStorage_FORMAT_AUTO: int -FileStorage_FORMAT_JSON: int -FileStorage_FORMAT_MASK: int -FileStorage_FORMAT_XML: int -FileStorage_FORMAT_YAML: int -FileStorage_INSIDE_MAP: int -FileStorage_MEMORY: int -FileStorage_NAME_EXPECTED: int -FileStorage_READ: int -FileStorage_UNDEFINED: int -FileStorage_VALUE_EXPECTED: int -FileStorage_WRITE: int -FileStorage_WRITE_BASE64: int -Formatter_FMT_C: int -Formatter_FMT_CSV: int -Formatter_FMT_DEFAULT: int -Formatter_FMT_MATLAB: int -Formatter_FMT_NUMPY: int -Formatter_FMT_PYTHON: int -GC_BGD: int -GC_EVAL: int -GC_EVAL_FREEZE_MODEL: int -GC_FGD: int -GC_INIT_WITH_MASK: int -GC_INIT_WITH_RECT: int -GC_PR_BGD: int -GC_PR_FGD: int -GEMM_1_T: int -GEMM_2_T: int -GEMM_3_T: int -GFLUID_KERNEL_KIND_FILTER: int -GFLUID_KERNEL_KIND_RESIZE: int -GFLUID_KERNEL_KIND_YUV420TO_RGB: int -GFluidKernel_Kind_Filter: int -GFluidKernel_Kind_Resize: int -GFluidKernel_Kind_YUV420toRGB: int -GSHAPE_GARRAY: int -GSHAPE_GFRAME: int -GSHAPE_GMAT: int -GSHAPE_GOPAQUE: int -GSHAPE_GSCALAR: int -GShape_GARRAY: int -GShape_GFRAME: int -GShape_GMAT: int -GShape_GOPAQUE: int -GShape_GSCALAR: int -HISTCMP_BHATTACHARYYA: int -HISTCMP_CHISQR: int -HISTCMP_CHISQR_ALT: int -HISTCMP_CORREL: int -HISTCMP_HELLINGER: int -HISTCMP_INTERSECT: int -HISTCMP_KL_DIV: int -HOGDESCRIPTOR_DEFAULT_NLEVELS: int -HOGDESCRIPTOR_DESCR_FORMAT_COL_BY_COL: int -HOGDESCRIPTOR_DESCR_FORMAT_ROW_BY_ROW: int -HOGDESCRIPTOR_L2HYS: int -HOGDescriptor_DEFAULT_NLEVELS: int -HOGDescriptor_DESCR_FORMAT_COL_BY_COL: int -HOGDescriptor_DESCR_FORMAT_ROW_BY_ROW: int -HOGDescriptor_L2Hys: int -HOUGH_GRADIENT: int -HOUGH_GRADIENT_ALT: int -HOUGH_MULTI_SCALE: int -HOUGH_PROBABILISTIC: int -HOUGH_STANDARD: int -IMREAD_ANYCOLOR: int -IMREAD_ANYDEPTH: int -IMREAD_COLOR: int -IMREAD_GRAYSCALE: int -IMREAD_IGNORE_ORIENTATION: int -IMREAD_LOAD_GDAL: int -IMREAD_REDUCED_COLOR_2: int -IMREAD_REDUCED_COLOR_4: int -IMREAD_REDUCED_COLOR_8: int -IMREAD_REDUCED_GRAYSCALE_2: int -IMREAD_REDUCED_GRAYSCALE_4: int -IMREAD_REDUCED_GRAYSCALE_8: int -IMREAD_UNCHANGED: int -IMWRITE_EXR_COMPRESSION: int -IMWRITE_EXR_COMPRESSION_B44: int -IMWRITE_EXR_COMPRESSION_B44A: int -IMWRITE_EXR_COMPRESSION_DWAA: int -IMWRITE_EXR_COMPRESSION_DWAB: int -IMWRITE_EXR_COMPRESSION_NO: int -IMWRITE_EXR_COMPRESSION_PIZ: int -IMWRITE_EXR_COMPRESSION_PXR24: int -IMWRITE_EXR_COMPRESSION_RLE: int -IMWRITE_EXR_COMPRESSION_ZIP: int -IMWRITE_EXR_COMPRESSION_ZIPS: int -IMWRITE_EXR_TYPE: int -IMWRITE_EXR_TYPE_FLOAT: int -IMWRITE_EXR_TYPE_HALF: int -IMWRITE_JPEG2000_COMPRESSION_X1000: int -IMWRITE_JPEG_CHROMA_QUALITY: int -IMWRITE_JPEG_LUMA_QUALITY: int -IMWRITE_JPEG_OPTIMIZE: int -IMWRITE_JPEG_PROGRESSIVE: int -IMWRITE_JPEG_QUALITY: int -IMWRITE_JPEG_RST_INTERVAL: int -IMWRITE_PAM_FORMAT_BLACKANDWHITE: int -IMWRITE_PAM_FORMAT_GRAYSCALE: int -IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA: int -IMWRITE_PAM_FORMAT_NULL: int -IMWRITE_PAM_FORMAT_RGB: int -IMWRITE_PAM_FORMAT_RGB_ALPHA: int -IMWRITE_PAM_TUPLETYPE: int -IMWRITE_PNG_BILEVEL: int -IMWRITE_PNG_COMPRESSION: int -IMWRITE_PNG_STRATEGY: int -IMWRITE_PNG_STRATEGY_DEFAULT: int -IMWRITE_PNG_STRATEGY_FILTERED: int -IMWRITE_PNG_STRATEGY_FIXED: int -IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY: int -IMWRITE_PNG_STRATEGY_RLE: int -IMWRITE_PXM_BINARY: int -IMWRITE_TIFF_COMPRESSION: int -IMWRITE_TIFF_RESUNIT: int -IMWRITE_TIFF_XDPI: int -IMWRITE_TIFF_YDPI: int -IMWRITE_WEBP_QUALITY: int -INPAINT_NS: int -INPAINT_TELEA: int -INTERSECT_FULL: int -INTERSECT_NONE: int -INTERSECT_PARTIAL: int -INTER_AREA: int -INTER_BITS: int -INTER_BITS2: int -INTER_CUBIC: int -INTER_LANCZOS4: int -INTER_LINEAR: int -INTER_LINEAR_EXACT: int -INTER_MAX: int -INTER_NEAREST: int -INTER_NEAREST_EXACT: int -INTER_TAB_SIZE: int -INTER_TAB_SIZE2: int -KAZE_DIFF_CHARBONNIER: int -KAZE_DIFF_PM_G1: int -KAZE_DIFF_PM_G2: int -KAZE_DIFF_WEICKERT: int -KMEANS_PP_CENTERS: int -KMEANS_RANDOM_CENTERS: int -KMEANS_USE_INITIAL_LABELS: int -LDR_SIZE: int -LINE_4: int -LINE_8: int -LINE_AA: int -LMEDS: int -LOCAL_OPTIM_GC: int -LOCAL_OPTIM_INNER_AND_ITER_LO: int -LOCAL_OPTIM_INNER_LO: int -LOCAL_OPTIM_NULL: int -LOCAL_OPTIM_SIGMA: int -LSD_REFINE_ADV: int -LSD_REFINE_NONE: int -LSD_REFINE_STD: int -MARKER_CROSS: int -MARKER_DIAMOND: int -MARKER_SQUARE: int -MARKER_STAR: int -MARKER_TILTED_CROSS: int -MARKER_TRIANGLE_DOWN: int -MARKER_TRIANGLE_UP: int -MAT_AUTO_STEP: int -MAT_CONTINUOUS_FLAG: int -MAT_DEPTH_MASK: int -MAT_MAGIC_MASK: int -MAT_MAGIC_VAL: int -MAT_SUBMATRIX_FLAG: int -MAT_TYPE_MASK: int -MEDIA_FORMAT_BGR: int -MEDIA_FORMAT_NV12: int -MEDIA_FRAME_ACCESS_R: int -MEDIA_FRAME_ACCESS_W: int -MIXED_CLONE: int -MONOCHROME_TRANSFER: int -MORPH_BLACKHAT: int -MORPH_CLOSE: int -MORPH_CROSS: int -MORPH_DILATE: int -MORPH_ELLIPSE: int -MORPH_ERODE: int -MORPH_GRADIENT: int -MORPH_HITMISS: int -MORPH_OPEN: int -MORPH_RECT: int -MORPH_TOPHAT: int -MOTION_AFFINE: int -MOTION_EUCLIDEAN: int -MOTION_HOMOGRAPHY: int -MOTION_TRANSLATION: int -Mat_AUTO_STEP: int -Mat_CONTINUOUS_FLAG: int -Mat_DEPTH_MASK: int -Mat_MAGIC_MASK: int -Mat_MAGIC_VAL: int -Mat_SUBMATRIX_FLAG: int -Mat_TYPE_MASK: int -MediaFormat_BGR: int -MediaFormat_NV12: int -MediaFrame_Access_R: int -MediaFrame_Access_W: int -NEIGH_FLANN_KNN: int -NEIGH_FLANN_RADIUS: int -NEIGH_GRID: int -NORMAL_CLONE: int -NORMCONV_FILTER: int -NORM_HAMMING: int -NORM_HAMMING2: int -NORM_INF: int -NORM_L1: int -NORM_L2: int -NORM_L2SQR: int -NORM_MINMAX: int -NORM_RELATIVE: int -NORM_TYPE_MASK: int -OPTFLOW_FARNEBACK_GAUSSIAN: int -OPTFLOW_LK_GET_MIN_EIGENVALS: int -OPTFLOW_USE_INITIAL_FLOW: int -ORB_FAST_SCORE: int -ORB_HARRIS_SCORE: int -PARAM_ALGORITHM: int -PARAM_BOOLEAN: int -PARAM_FLOAT: int -PARAM_INT: int -PARAM_MAT: int -PARAM_MAT_VECTOR: int -PARAM_REAL: int -PARAM_SCALAR: int -PARAM_STRING: int -PARAM_UCHAR: int -PARAM_UINT64: int -PARAM_UNSIGNED_INT: int -PCA_DATA_AS_COL: int -PCA_DATA_AS_ROW: int -PCA_USE_AVG: int -PROJ_SPHERICAL_EQRECT: int -PROJ_SPHERICAL_ORTHO: int -Param_ALGORITHM: int -Param_BOOLEAN: int -Param_FLOAT: int -Param_INT: int -Param_MAT: int -Param_MAT_VECTOR: int -Param_REAL: int -Param_SCALAR: int -Param_STRING: int -Param_UCHAR: int -Param_UINT64: int -Param_UNSIGNED_INT: int -QRCODE_ENCODER_CORRECT_LEVEL_H: int -QRCODE_ENCODER_CORRECT_LEVEL_L: int -QRCODE_ENCODER_CORRECT_LEVEL_M: int -QRCODE_ENCODER_CORRECT_LEVEL_Q: int -QRCODE_ENCODER_ECI_UTF8: int -QRCODE_ENCODER_MODE_ALPHANUMERIC: int -QRCODE_ENCODER_MODE_AUTO: int -QRCODE_ENCODER_MODE_BYTE: int -QRCODE_ENCODER_MODE_ECI: int -QRCODE_ENCODER_MODE_KANJI: int -QRCODE_ENCODER_MODE_NUMERIC: int -QRCODE_ENCODER_MODE_STRUCTURED_APPEND: int -QRCodeEncoder_CORRECT_LEVEL_H: int -QRCodeEncoder_CORRECT_LEVEL_L: int -QRCodeEncoder_CORRECT_LEVEL_M: int -QRCodeEncoder_CORRECT_LEVEL_Q: int -QRCodeEncoder_ECI_UTF8: int -QRCodeEncoder_MODE_ALPHANUMERIC: int -QRCodeEncoder_MODE_AUTO: int -QRCodeEncoder_MODE_BYTE: int -QRCodeEncoder_MODE_ECI: int -QRCodeEncoder_MODE_KANJI: int -QRCodeEncoder_MODE_NUMERIC: int -QRCodeEncoder_MODE_STRUCTURED_APPEND: int -QT_CHECKBOX: int -QT_FONT_BLACK: int -QT_FONT_BOLD: int -QT_FONT_DEMIBOLD: int -QT_FONT_LIGHT: int -QT_FONT_NORMAL: int -QT_NEW_BUTTONBAR: int -QT_PUSH_BUTTON: int -QT_RADIOBOX: int -QT_STYLE_ITALIC: int -QT_STYLE_NORMAL: int -QT_STYLE_OBLIQUE: int -QUAT_ASSUME_NOT_UNIT: int -QUAT_ASSUME_UNIT: int -QUAT_ENUM_EULER_ANGLES_MAX_VALUE: int -QUAT_ENUM_EXT_XYX: int -QUAT_ENUM_EXT_XYZ: int -QUAT_ENUM_EXT_XZX: int -QUAT_ENUM_EXT_XZY: int -QUAT_ENUM_EXT_YXY: int -QUAT_ENUM_EXT_YXZ: int -QUAT_ENUM_EXT_YZX: int -QUAT_ENUM_EXT_YZY: int -QUAT_ENUM_EXT_ZXY: int -QUAT_ENUM_EXT_ZXZ: int -QUAT_ENUM_EXT_ZYX: int -QUAT_ENUM_EXT_ZYZ: int -QUAT_ENUM_INT_XYX: int -QUAT_ENUM_INT_XYZ: int -QUAT_ENUM_INT_XZX: int -QUAT_ENUM_INT_XZY: int -QUAT_ENUM_INT_YXY: int -QUAT_ENUM_INT_YXZ: int -QUAT_ENUM_INT_YZX: int -QUAT_ENUM_INT_YZY: int -QUAT_ENUM_INT_ZXY: int -QUAT_ENUM_INT_ZXZ: int -QUAT_ENUM_INT_ZYX: int -QUAT_ENUM_INT_ZYZ: int -QuatEnum_EULER_ANGLES_MAX_VALUE: int -QuatEnum_EXT_XYX: int -QuatEnum_EXT_XYZ: int -QuatEnum_EXT_XZX: int -QuatEnum_EXT_XZY: int -QuatEnum_EXT_YXY: int -QuatEnum_EXT_YXZ: int -QuatEnum_EXT_YZX: int -QuatEnum_EXT_YZY: int -QuatEnum_EXT_ZXY: int -QuatEnum_EXT_ZXZ: int -QuatEnum_EXT_ZYX: int -QuatEnum_EXT_ZYZ: int -QuatEnum_INT_XYX: int -QuatEnum_INT_XYZ: int -QuatEnum_INT_XZX: int -QuatEnum_INT_XZY: int -QuatEnum_INT_YXY: int -QuatEnum_INT_YXZ: int -QuatEnum_INT_YZX: int -QuatEnum_INT_YZY: int -QuatEnum_INT_ZXY: int -QuatEnum_INT_ZXZ: int -QuatEnum_INT_ZYX: int -QuatEnum_INT_ZYZ: int -RANSAC: int -RECURS_FILTER: int -REDUCE_AVG: int -REDUCE_MAX: int -REDUCE_MIN: int -REDUCE_SUM: int -RETR_CCOMP: int -RETR_EXTERNAL: int -RETR_FLOODFILL: int -RETR_LIST: int -RETR_TREE: int -RHO: int -RMAT_ACCESS_R: int -RMAT_ACCESS_W: int -RMat_Access_R: int -RMat_Access_W: int -RNG_NORMAL: int -RNG_UNIFORM: int -ROTATE_180: int -ROTATE_90_CLOCKWISE: int -ROTATE_90_COUNTERCLOCKWISE: int -SAMPLING_NAPSAC: int -SAMPLING_PROGRESSIVE_NAPSAC: int -SAMPLING_PROSAC: int -SAMPLING_UNIFORM: int -SCORE_METHOD_LMEDS: int -SCORE_METHOD_MAGSAC: int -SCORE_METHOD_MSAC: int -SCORE_METHOD_RANSAC: int -SOLVELP_MULTI: int -SOLVELP_SINGLE: int -SOLVELP_UNBOUNDED: int -SOLVELP_UNFEASIBLE: int -SOLVEPNP_AP3P: int -SOLVEPNP_DLS: int -SOLVEPNP_EPNP: int -SOLVEPNP_IPPE: int -SOLVEPNP_IPPE_SQUARE: int -SOLVEPNP_ITERATIVE: int -SOLVEPNP_MAX_COUNT: int -SOLVEPNP_P3P: int -SOLVEPNP_SQPNP: int -SOLVEPNP_UPNP: int -SORT_ASCENDING: int -SORT_DESCENDING: int -SORT_EVERY_COLUMN: int -SORT_EVERY_ROW: int -SPARSE_MAT_HASH_BIT: int -SPARSE_MAT_HASH_SCALE: int -SPARSE_MAT_MAGIC_VAL: int -SPARSE_MAT_MAX_DIM: int -STEREO_BM_PREFILTER_NORMALIZED_RESPONSE: int -STEREO_BM_PREFILTER_XSOBEL: int -STEREO_MATCHER_DISP_SCALE: int -STEREO_MATCHER_DISP_SHIFT: int -STEREO_SGBM_MODE_HH: int -STEREO_SGBM_MODE_HH4: int -STEREO_SGBM_MODE_SGBM: int -STEREO_SGBM_MODE_SGBM_3WAY: int -STITCHER_ERR_CAMERA_PARAMS_ADJUST_FAIL: int -STITCHER_ERR_HOMOGRAPHY_EST_FAIL: int -STITCHER_ERR_NEED_MORE_IMGS: int -STITCHER_OK: int -STITCHER_PANORAMA: int -STITCHER_SCANS: int -SUBDIV2D_NEXT_AROUND_DST: int -SUBDIV2D_NEXT_AROUND_LEFT: int -SUBDIV2D_NEXT_AROUND_ORG: int -SUBDIV2D_NEXT_AROUND_RIGHT: int -SUBDIV2D_PREV_AROUND_DST: int -SUBDIV2D_PREV_AROUND_LEFT: int -SUBDIV2D_PREV_AROUND_ORG: int -SUBDIV2D_PREV_AROUND_RIGHT: int -SUBDIV2D_PTLOC_ERROR: int -SUBDIV2D_PTLOC_INSIDE: int -SUBDIV2D_PTLOC_ON_EDGE: int -SUBDIV2D_PTLOC_OUTSIDE_RECT: int -SUBDIV2D_PTLOC_VERTEX: int -SVD_FULL_UV: int -SVD_MODIFY_A: int -SVD_NO_UV: int -SparseMat_HASH_BIT: int -SparseMat_HASH_SCALE: int -SparseMat_MAGIC_VAL: int -SparseMat_MAX_DIM: int -StereoBM_PREFILTER_NORMALIZED_RESPONSE: int -StereoBM_PREFILTER_XSOBEL: int -StereoMatcher_DISP_SCALE: int -StereoMatcher_DISP_SHIFT: int -StereoSGBM_MODE_HH: int -StereoSGBM_MODE_HH4: int -StereoSGBM_MODE_SGBM: int -StereoSGBM_MODE_SGBM_3WAY: int -Stitcher_ERR_CAMERA_PARAMS_ADJUST_FAIL: int -Stitcher_ERR_HOMOGRAPHY_EST_FAIL: int -Stitcher_ERR_NEED_MORE_IMGS: int -Stitcher_OK: int -Stitcher_PANORAMA: int -Stitcher_SCANS: int -Subdiv2D_NEXT_AROUND_DST: int -Subdiv2D_NEXT_AROUND_LEFT: int -Subdiv2D_NEXT_AROUND_ORG: int -Subdiv2D_NEXT_AROUND_RIGHT: int -Subdiv2D_PREV_AROUND_DST: int -Subdiv2D_PREV_AROUND_LEFT: int -Subdiv2D_PREV_AROUND_ORG: int -Subdiv2D_PREV_AROUND_RIGHT: int -Subdiv2D_PTLOC_ERROR: int -Subdiv2D_PTLOC_INSIDE: int -Subdiv2D_PTLOC_ON_EDGE: int -Subdiv2D_PTLOC_OUTSIDE_RECT: int -Subdiv2D_PTLOC_VERTEX: int -TERM_CRITERIA_COUNT: int -TERM_CRITERIA_EPS: int -TERM_CRITERIA_MAX_ITER: int -THRESH_BINARY: int -THRESH_BINARY_INV: int -THRESH_MASK: int -THRESH_OTSU: int -THRESH_TOZERO: int -THRESH_TOZERO_INV: int -THRESH_TRIANGLE: int -THRESH_TRUNC: int -TM_CCOEFF: int -TM_CCOEFF_NORMED: int -TM_CCORR: int -TM_CCORR_NORMED: int -TM_SQDIFF: int -TM_SQDIFF_NORMED: int -TermCriteria_COUNT: int -TermCriteria_EPS: int -TermCriteria_MAX_ITER: int -UMAT_AUTO_STEP: int -UMAT_CONTINUOUS_FLAG: int -UMAT_DATA_ASYNC_CLEANUP: int -UMAT_DATA_COPY_ON_MAP: int -UMAT_DATA_DEVICE_COPY_OBSOLETE: int -UMAT_DATA_DEVICE_MEM_MAPPED: int -UMAT_DATA_HOST_COPY_OBSOLETE: int -UMAT_DATA_TEMP_COPIED_UMAT: int -UMAT_DATA_TEMP_UMAT: int -UMAT_DATA_USER_ALLOCATED: int -UMAT_DEPTH_MASK: int -UMAT_MAGIC_MASK: int -UMAT_MAGIC_VAL: int -UMAT_SUBMATRIX_FLAG: int -UMAT_TYPE_MASK: int -UMatData_ASYNC_CLEANUP: int -UMatData_COPY_ON_MAP: int -UMatData_DEVICE_COPY_OBSOLETE: int -UMatData_DEVICE_MEM_MAPPED: int -UMatData_HOST_COPY_OBSOLETE: int -UMatData_TEMP_COPIED_UMAT: int -UMatData_TEMP_UMAT: int -UMatData_USER_ALLOCATED: int -UMat_AUTO_STEP: int -UMat_CONTINUOUS_FLAG: int -UMat_DEPTH_MASK: int -UMat_MAGIC_MASK: int -UMat_MAGIC_VAL: int -UMat_SUBMATRIX_FLAG: int -UMat_TYPE_MASK: int -USAC_ACCURATE: int -USAC_DEFAULT: int -USAC_FAST: int -USAC_FM_8PTS: int -USAC_MAGSAC: int -USAC_PARALLEL: int -USAC_PROSAC: int -USAGE_ALLOCATE_DEVICE_MEMORY: int -USAGE_ALLOCATE_HOST_MEMORY: int -USAGE_ALLOCATE_SHARED_MEMORY: int -USAGE_DEFAULT: int -VIDEOWRITER_PROP_DEPTH: int -VIDEOWRITER_PROP_FRAMEBYTES: int -VIDEOWRITER_PROP_HW_ACCELERATION: int -VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL: int -VIDEOWRITER_PROP_HW_DEVICE: int -VIDEOWRITER_PROP_IS_COLOR: int -VIDEOWRITER_PROP_NSTRIPES: int -VIDEOWRITER_PROP_QUALITY: int -VIDEO_ACCELERATION_ANY: int -VIDEO_ACCELERATION_D3D11: int -VIDEO_ACCELERATION_MFX: int -VIDEO_ACCELERATION_NONE: int -VIDEO_ACCELERATION_VAAPI: int -WARP_FILL_OUTLIERS: int -WARP_INVERSE_MAP: int -WARP_POLAR_LINEAR: int -WARP_POLAR_LOG: int -WINDOW_AUTOSIZE: int -WINDOW_FREERATIO: int -WINDOW_FULLSCREEN: int -WINDOW_GUI_EXPANDED: int -WINDOW_GUI_NORMAL: int -WINDOW_KEEPRATIO: int -WINDOW_NORMAL: int -WINDOW_OPENGL: int -WND_PROP_ASPECT_RATIO: int -WND_PROP_AUTOSIZE: int -WND_PROP_FULLSCREEN: int -WND_PROP_OPENGL: int -WND_PROP_TOPMOST: int -WND_PROP_VISIBLE: int -WND_PROP_VSYNC: int - - -class AKAZE(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getDescriptorChannels(self, *args, **kwargs): ... # incomplete - def getDescriptorSize(self, *args, **kwargs): ... # incomplete - def getDescriptorType(self, *args, **kwargs): ... # incomplete - def getDiffusivity(self, *args, **kwargs): ... # incomplete - def getNOctaveLayers(self, *args, **kwargs): ... # incomplete - def getNOctaves(self, *args, **kwargs): ... # incomplete - def getThreshold(self, *args, **kwargs): ... # incomplete - def setDescriptorChannels(self, dch) -> None: ... - def setDescriptorSize(self, dsize) -> None: ... - def setDescriptorType(self, dtype) -> None: ... - def setDiffusivity(self, diff) -> None: ... - def setNOctaveLayers(self, octaveLayers) -> None: ... - def setNOctaves(self, octaves) -> None: ... - def setThreshold(self, threshold) -> None: ... - - -class AffineFeature(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getViewParams(self, tilts, rolls) -> None: ... - def setViewParams(self, tilts, rolls) -> None: ... - - -class AgastFeatureDetector(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getNonmaxSuppression(self, *args, **kwargs): ... # incomplete - def getThreshold(self, *args, **kwargs): ... # incomplete - def getType(self, *args, **kwargs): ... # incomplete - def setNonmaxSuppression(self, f) -> None: ... - def setThreshold(self, threshold) -> None: ... - def setType(self, type) -> None: ... - - -class Algorithm: - def __init__(self, *args, **kwargs) -> None: ... - def clear(self) -> None: ... - def empty(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def read(self, fn) -> None: ... - def save(self, filename) -> None: ... - def write(self, *args, **kwargs): ... # incomplete - - -class AlignExposures(Algorithm): - def process(self, src, dst, times, response) -> None: ... - - -class AlignMTB(AlignExposures): - def calculateShift(self, *args, **kwargs): ... # incomplete - def computeBitmaps(self, *args, **kwargs): ... # incomplete - def getCut(self, *args, **kwargs): ... # incomplete - def getExcludeRange(self, *args, **kwargs): ... # incomplete - def getMaxBits(self, *args, **kwargs): ... # incomplete - @overload - def process(self, src, dst, times, response) -> None: ... - @overload - def process(self, src, dst) -> None: ... - def setCut(self, value) -> None: ... - def setExcludeRange(self, exclude_range) -> None: ... - def setMaxBits(self, max_bits) -> None: ... - def shiftMat(self, *args, **kwargs): ... # incomplete - - -class AsyncArray: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def get(self, *args, **kwargs): ... # incomplete - def release(self) -> None: ... - def valid(self, *args, **kwargs): ... # incomplete - def wait_for(self, *args, **kwargs): ... # incomplete - - -class BFMatcher(DescriptorMatcher): - def __init__(self, normType: int | None = ..., crossCheck: _Boolean = ...) -> None: ... - def create(self, *args, **kwargs): ... # incomplete - - -class BOWImgDescriptorExtractor: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def compute(self, *args, **kwargs): ... # incomplete - def descriptorSize(self, *args, **kwargs): ... # incomplete - def descriptorType(self, *args, **kwargs): ... # incomplete - def getVocabulary(self, *args, **kwargs): ... # incomplete - def setVocabulary(self, vocabulary) -> None: ... - - -class BOWKMeansTrainer(BOWTrainer): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def cluster(self, *args, **kwargs): ... # incomplete - - -class BOWTrainer: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def add(self, descriptors) -> None: ... - def clear(self) -> None: ... - def cluster(self, *args, **kwargs): ... # incomplete - def descriptorsCount(self, *args, **kwargs): ... # incomplete - def getDescriptors(self, *args, **kwargs): ... # incomplete - - -class BRISK(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getOctaves(self, *args, **kwargs): ... # incomplete - def getThreshold(self, *args, **kwargs): ... # incomplete - def setOctaves(self, octaves) -> None: ... - def setThreshold(self, threshold) -> None: ... - - -class BackgroundSubtractor(Algorithm): - def apply(self, *args, **kwargs): ... # incomplete - def getBackgroundImage(self, *args, **kwargs): ... # incomplete - - -class BackgroundSubtractorKNN(BackgroundSubtractor): - def getDetectShadows(self, *args, **kwargs): ... # incomplete - def getDist2Threshold(self, *args, **kwargs): ... # incomplete - def getHistory(self, *args, **kwargs): ... # incomplete - def getNSamples(self, *args, **kwargs): ... # incomplete - def getShadowThreshold(self, *args, **kwargs): ... # incomplete - def getShadowValue(self, *args, **kwargs): ... # incomplete - def getkNNSamples(self, *args, **kwargs): ... # incomplete - def setDetectShadows(self, detectShadows) -> None: ... - def setDist2Threshold(self, _dist2Threshold) -> None: ... - def setHistory(self, history) -> None: ... - def setNSamples(self, _nN) -> None: ... - def setShadowThreshold(self, threshold) -> None: ... - def setShadowValue(self, value) -> None: ... - def setkNNSamples(self, _nkNN) -> None: ... - - -class BackgroundSubtractorMOG2(BackgroundSubtractor): - def apply(self, *args, **kwargs): ... # incomplete - def getBackgroundRatio(self, *args, **kwargs): ... # incomplete - def getComplexityReductionThreshold(self, *args, **kwargs): ... # incomplete - def getDetectShadows(self, *args, **kwargs): ... # incomplete - def getHistory(self, *args, **kwargs): ... # incomplete - def getNMixtures(self, *args, **kwargs): ... # incomplete - def getShadowThreshold(self, *args, **kwargs): ... # incomplete - def getShadowValue(self, *args, **kwargs): ... # incomplete - def getVarInit(self, *args, **kwargs): ... # incomplete - def getVarMax(self, *args, **kwargs): ... # incomplete - def getVarMin(self, *args, **kwargs): ... # incomplete - def getVarThreshold(self, *args, **kwargs): ... # incomplete - def getVarThresholdGen(self, *args, **kwargs): ... # incomplete - def setBackgroundRatio(self, ratio) -> None: ... - def setComplexityReductionThreshold(self, ct) -> None: ... - def setDetectShadows(self, detectShadows) -> None: ... - def setHistory(self, history) -> None: ... - def setNMixtures(self, nmixtures) -> None: ... - def setShadowThreshold(self, threshold) -> None: ... - def setShadowValue(self, value) -> None: ... - def setVarInit(self, varInit) -> None: ... - def setVarMax(self, varMax) -> None: ... - def setVarMin(self, varMin) -> None: ... - def setVarThreshold(self, varThreshold) -> None: ... - def setVarThresholdGen(self, varThresholdGen) -> None: ... - - -class BaseCascadeClassifier(Algorithm): ... - - -class CLAHE(Algorithm): - def apply(self, *args, **kwargs): ... # incomplete - def collectGarbage(self) -> None: ... - def getClipLimit(self, *args, **kwargs): ... # incomplete - def getTilesGridSize(self, *args, **kwargs): ... # incomplete - def setClipLimit(self, clipLimit) -> None: ... - def setTilesGridSize(self, tileGridSize) -> None: ... - - -class CalibrateCRF(Algorithm): - def process(self, *args, **kwargs): ... # incomplete - - -class CalibrateDebevec(CalibrateCRF): - def getLambda(self, *args, **kwargs): ... # incomplete - def getRandom(self, *args, **kwargs): ... # incomplete - def getSamples(self, *args, **kwargs): ... # incomplete - def setLambda(self, lambda_) -> None: ... - def setRandom(self, random) -> None: ... - def setSamples(self, samples) -> None: ... - - -class CalibrateRobertson(CalibrateCRF): - def getMaxIter(self, *args, **kwargs): ... # incomplete - def getRadiance(self, *args, **kwargs): ... # incomplete - def getThreshold(self, *args, **kwargs): ... # incomplete - def setMaxIter(self, max_iter) -> None: ... - def setThreshold(self, threshold) -> None: ... - - -class CascadeClassifier: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def convert(self, *args, **kwargs): ... # incomplete - def detectMultiScale(self, *args, **kwargs): ... # incomplete - def detectMultiScale2(self, *args, **kwargs): ... # incomplete - def detectMultiScale3(self, *args, **kwargs): ... # incomplete - def empty(self, *args, **kwargs): ... # incomplete - def getFeatureType(self, *args, **kwargs): ... # incomplete - def getOriginalWindowSize(self, *args, **kwargs): ... # incomplete - def isOldFormatCascade(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def read(self, *args, **kwargs): ... # incomplete - - -class CirclesGridFinderParameters: - convexHullFactor: Incomplete - densityNeighborhoodSize: Incomplete - edgeGain: Incomplete - edgePenalty: Incomplete - existingVertexGain: Incomplete - keypointScale: Incomplete - kmeansAttempts: Incomplete - maxRectifiedDistance: Incomplete - minDensity: Incomplete - minDistanceToAddKeypoint: Incomplete - minGraphConfidence: Incomplete - minRNGEdgeSwitchDist: Incomplete - squareSize: Incomplete - vertexGain: Incomplete - vertexPenalty: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class DISOpticalFlow(DenseOpticalFlow): - def create(self, *args, **kwargs): ... # incomplete - def getFinestScale(self, *args, **kwargs): ... # incomplete - def getGradientDescentIterations(self, *args, **kwargs): ... # incomplete - def getPatchSize(self, *args, **kwargs): ... # incomplete - def getPatchStride(self, *args, **kwargs): ... # incomplete - def getUseMeanNormalization(self, *args, **kwargs): ... # incomplete - def getUseSpatialPropagation(self, *args, **kwargs): ... # incomplete - def getVariationalRefinementAlpha(self, *args, **kwargs): ... # incomplete - def getVariationalRefinementDelta(self, *args, **kwargs): ... # incomplete - def getVariationalRefinementGamma(self, *args, **kwargs): ... # incomplete - def getVariationalRefinementIterations(self, *args, **kwargs): ... # incomplete - def setFinestScale(self, val) -> None: ... - def setGradientDescentIterations(self, val) -> None: ... - def setPatchSize(self, val) -> None: ... - def setPatchStride(self, val) -> None: ... - def setUseMeanNormalization(self, val) -> None: ... - def setUseSpatialPropagation(self, val) -> None: ... - def setVariationalRefinementAlpha(self, val) -> None: ... - def setVariationalRefinementDelta(self, val) -> None: ... - def setVariationalRefinementGamma(self, val) -> None: ... - def setVariationalRefinementIterations(self, val) -> None: ... - - -class DMatch: - distance: Incomplete - imgIdx: Incomplete - queryIdx: Incomplete - trainIdx: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class DenseOpticalFlow(Algorithm): - def calc(self, I0, I1, flow) -> _flow: ... - def collectGarbage(self) -> None: ... - - -class DescriptorMatcher(Algorithm): - def add(self, descriptors) -> None: ... - def clear(self) -> None: ... - def clone(self, *args, **kwargs): ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def empty(self, *args, **kwargs): ... # incomplete - def getTrainDescriptors(self, *args, **kwargs): ... # incomplete - def isMaskSupported(self, *args, **kwargs): ... # incomplete - def knnMatch(self, *args, **kwargs): ... # incomplete - def match(self, *args, **kwargs): ... # incomplete - def radiusMatch(self, *args, **kwargs): ... # incomplete - def read(self, fileName) -> None: ... - def train(self) -> None: ... - def write(self, fileName) -> None: ... - - -class FaceDetectorYN: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def detect(self, *args, **kwargs): ... # incomplete - def getInputSize(self, *args, **kwargs): ... # incomplete - def getNMSThreshold(self, *args, **kwargs): ... # incomplete - def getScoreThreshold(self, *args, **kwargs): ... # incomplete - def getTopK(self, *args, **kwargs): ... # incomplete - def setInputSize(self, input_size) -> None: ... - def setNMSThreshold(self, nms_threshold) -> None: ... - def setScoreThreshold(self, score_threshold) -> None: ... - def setTopK(self, top_k) -> None: ... - - -class FaceRecognizerSF: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def alignCrop(self, *args, **kwargs): ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def feature(self, *args, **kwargs): ... # incomplete - def match(self, *args, **kwargs): ... # incomplete - - -class FarnebackOpticalFlow(DenseOpticalFlow): - def create(self, *args, **kwargs): ... # incomplete - def getFastPyramids(self, *args, **kwargs): ... # incomplete - def getFlags(self, *args, **kwargs): ... # incomplete - def getNumIters(self, *args, **kwargs): ... # incomplete - def getNumLevels(self, *args, **kwargs): ... # incomplete - def getPolyN(self, *args, **kwargs): ... # incomplete - def getPolySigma(self, *args, **kwargs): ... # incomplete - def getPyrScale(self, *args, **kwargs): ... # incomplete - def getWinSize(self, *args, **kwargs): ... # incomplete - def setFastPyramids(self, fastPyramids) -> None: ... - def setFlags(self, flags: int | None) -> None: ... - def setNumIters(self, numIters) -> None: ... - def setNumLevels(self, numLevels) -> None: ... - def setPolyN(self, polyN) -> None: ... - def setPolySigma(self, polySigma) -> None: ... - def setPyrScale(self, pyrScale) -> None: ... - def setWinSize(self, winSize) -> None: ... - - -class FastFeatureDetector(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getNonmaxSuppression(self, *args, **kwargs): ... # incomplete - def getThreshold(self, *args, **kwargs): ... # incomplete - def getType(self, *args, **kwargs): ... # incomplete - def setNonmaxSuppression(self, f) -> None: ... - def setThreshold(self, threshold) -> None: ... - def setType(self, type) -> None: ... - - -class Feature2D: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def compute(self, *args, **kwargs): ... # incomplete - def defaultNorm(self, *args, **kwargs): ... # incomplete - def descriptorSize(self, *args, **kwargs): ... # incomplete - def descriptorType(self, *args, **kwargs): ... # incomplete - def detect(self, *args, **kwargs): ... # incomplete - def detectAndCompute(self, *args, **kwargs): ... # incomplete - def empty(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - @overload - def read(self, fileName) -> None: ... - @overload - def read(self, arg1) -> None: ... - def write(self, fileName) -> None: ... - - -class FileNode: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def at(self, *args, **kwargs): ... # incomplete - def empty(self, *args, **kwargs): ... # incomplete - def getNode(self, *args, **kwargs): ... # incomplete - def isInt(self, *args, **kwargs): ... # incomplete - def isMap(self, *args, **kwargs): ... # incomplete - def isNamed(self, *args, **kwargs): ... # incomplete - def isNone(self, *args, **kwargs): ... # incomplete - def isReal(self, *args, **kwargs): ... # incomplete - def isSeq(self, *args, **kwargs): ... # incomplete - def isString(self, *args, **kwargs): ... # incomplete - def keys(self, *args, **kwargs): ... # incomplete - def mat(self, *args, **kwargs): ... # incomplete - def name(self, *args, **kwargs): ... # incomplete - def rawSize(self, *args, **kwargs): ... # incomplete - def real(self, *args, **kwargs): ... # incomplete - def size(self, *args, **kwargs): ... # incomplete - def string(self, *args, **kwargs): ... # incomplete - def type(self, *args, **kwargs): ... # incomplete - - -class FileStorage: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def endWriteStruct(self) -> None: ... - def getFirstTopLevelNode(self, *args, **kwargs): ... # incomplete - def getFormat(self, *args, **kwargs): ... # incomplete - def getNode(self, *args, **kwargs): ... # incomplete - def isOpened(self, *args, **kwargs): ... # incomplete - def open(self, *args, **kwargs): ... # incomplete - def release(self) -> None: ... - def releaseAndGetString(self, *args, **kwargs): ... # incomplete - def root(self, *args, **kwargs): ... # incomplete - def startWriteStruct(self, *args, **kwargs): ... # incomplete - def write(self, name, val) -> None: ... - def writeComment(self, *args, **kwargs): ... # incomplete - - -class FlannBasedMatcher(DescriptorMatcher): - def __init__(self, indexParams=..., searchParams=...) -> None: ... - def create(self, *args, **kwargs): ... # incomplete - - -class GArrayDesc: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class GArrayT: - def __init__(self, type: int) -> None: ... - def type(self) -> int: ... - - -class GCompileArg: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class GComputation: - def __init__(self, arg: gapi_GKernelPackage | gapi_GNetPackage | queue_capacity) -> None: ... - def apply(self): ... - def compileStreaming(self, *args, **kwargs): ... # incomplete - - -class GFTTDetector(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getBlockSize(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getHarrisDetector(self, *args, **kwargs): ... # incomplete - def getK(self, *args, **kwargs): ... # incomplete - def getMaxFeatures(self, *args, **kwargs): ... # incomplete - def getMinDistance(self, *args, **kwargs): ... # incomplete - def getQualityLevel(self, *args, **kwargs): ... # incomplete - def setBlockSize(self, blockSize) -> None: ... - def setHarrisDetector(self, val) -> None: ... - def setK(self, k) -> None: ... - def setMaxFeatures(self, maxFeatures) -> None: ... - def setMinDistance(self, minDistance) -> None: ... - def setQualityLevel(self, qlevel) -> None: ... - - -class GFrame: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class GInferInputs: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def setInput(self, *args, **kwargs): ... # incomplete - - -class GInferListInputs: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def setInput(self, *args, **kwargs): ... # incomplete - - -class GInferListOutputs: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def at(self, *args, **kwargs): ... # incomplete - - -class GInferOutputs: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def at(self, *args, **kwargs): ... # incomplete - - -class GMat: - def __init__(self) -> None: ... - - -class GMatDesc: - chan: Incomplete - depth: Incomplete - dims: Incomplete - planar: Incomplete - size: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def asInterleaved(self, *args, **kwargs): ... # incomplete - def asPlanar(self, *args, **kwargs): ... # incomplete - def withDepth(self, *args, **kwargs): ... # incomplete - def withSize(self, *args, **kwargs): ... # incomplete - def withSizeDelta(self, *args, **kwargs): ... # incomplete - def withType(self, *args, **kwargs): ... # incomplete - - -class GOpaqueDesc: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class GOpaqueT: - def __init__(self, type: int) -> None: ... - def type(self) -> int: ... - - -class GScalar: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class GScalarDesc: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class GStreamingCompiled: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def pull(self, *args, **kwargs): ... # incomplete - def running(self, *args, **kwargs): ... # incomplete - def setSource(self, callback) -> None: ... - def start(self) -> None: ... - def stop(self) -> None: ... - - -class GeneralizedHough(Algorithm): - def detect(self, *args, **kwargs): ... # incomplete - def getCannyHighThresh(self, *args, **kwargs): ... # incomplete - def getCannyLowThresh(self, *args, **kwargs): ... # incomplete - def getDp(self, *args, **kwargs): ... # incomplete - def getMaxBufferSize(self, *args, **kwargs): ... # incomplete - def getMinDist(self, *args, **kwargs): ... # incomplete - def setCannyHighThresh(self, cannyHighThresh) -> None: ... - def setCannyLowThresh(self, cannyLowThresh) -> None: ... - def setDp(self, dp) -> None: ... - def setMaxBufferSize(self, maxBufferSize) -> None: ... - def setMinDist(self, minDist) -> None: ... - def setTemplate(self, *args, **kwargs): ... # incomplete - - -class GeneralizedHoughBallard(GeneralizedHough): - def getLevels(self, *args, **kwargs): ... # incomplete - def getVotesThreshold(self, *args, **kwargs): ... # incomplete - def setLevels(self, levels) -> None: ... - def setVotesThreshold(self, votesThreshold) -> None: ... - - -class GeneralizedHoughGuil(GeneralizedHough): - def getAngleEpsilon(self, *args, **kwargs): ... # incomplete - def getAngleStep(self, *args, **kwargs): ... # incomplete - def getAngleThresh(self, *args, **kwargs): ... # incomplete - def getLevels(self, *args, **kwargs): ... # incomplete - def getMaxAngle(self, *args, **kwargs): ... # incomplete - def getMaxScale(self, *args, **kwargs): ... # incomplete - def getMinAngle(self, *args, **kwargs): ... # incomplete - def getMinScale(self, *args, **kwargs): ... # incomplete - def getPosThresh(self, *args, **kwargs): ... # incomplete - def getScaleStep(self, *args, **kwargs): ... # incomplete - def getScaleThresh(self, *args, **kwargs): ... # incomplete - def getXi(self, *args, **kwargs): ... # incomplete - def setAngleEpsilon(self, angleEpsilon) -> None: ... - def setAngleStep(self, angleStep) -> None: ... - def setAngleThresh(self, angleThresh) -> None: ... - def setLevels(self, levels) -> None: ... - def setMaxAngle(self, maxAngle) -> None: ... - def setMaxScale(self, maxScale) -> None: ... - def setMinAngle(self, minAngle) -> None: ... - def setMinScale(self, minScale) -> None: ... - def setPosThresh(self, posThresh) -> None: ... - def setScaleStep(self, scaleStep) -> None: ... - def setScaleThresh(self, scaleThresh) -> None: ... - def setXi(self, xi) -> None: ... - - -class HOGDescriptor: - L2HysThreshold: Incomplete - blockSize: Incomplete - blockStride: Incomplete - cellSize: Incomplete - derivAperture: Incomplete - gammaCorrection: Incomplete - histogramNormType: Incomplete - nbins: Incomplete - nlevels: Incomplete - signedGradient: Incomplete - svmDetector: Incomplete - winSigma: Incomplete - winSize: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def checkDetectorSize(self, *args, **kwargs): ... # incomplete - def compute(self, *args, **kwargs): ... # incomplete - def computeGradient(self, *args, **kwargs): ... # incomplete - def detect(self, *args, **kwargs): ... # incomplete - def detectMultiScale(self, *args, **kwargs): ... # incomplete - def getDaimlerPeopleDetector(self, *args, **kwargs): ... # incomplete - def getDefaultPeopleDetector(self, *args, **kwargs): ... # incomplete - def getDescriptorSize(self, *args, **kwargs): ... # incomplete - def getWinSigma(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def save(self, *args, **kwargs): ... # incomplete - def setSVMDetector(self, svmdetector) -> None: ... - - -class KAZE(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getDiffusivity(self, *args, **kwargs): ... # incomplete - def getExtended(self, *args, **kwargs): ... # incomplete - def getNOctaveLayers(self, *args, **kwargs): ... # incomplete - def getNOctaves(self, *args, **kwargs): ... # incomplete - def getThreshold(self, *args, **kwargs): ... # incomplete - def getUpright(self, *args, **kwargs): ... # incomplete - def setDiffusivity(self, diff) -> None: ... - def setExtended(self, extended) -> None: ... - def setNOctaveLayers(self, octaveLayers) -> None: ... - def setNOctaves(self, octaves) -> None: ... - def setThreshold(self, threshold) -> None: ... - def setUpright(self, upright) -> None: ... - - -class KalmanFilter: - controlMatrix: Incomplete - errorCovPost: Incomplete - errorCovPre: Incomplete - gain: Incomplete - measurementMatrix: Incomplete - measurementNoiseCov: Incomplete - processNoiseCov: Incomplete - statePost: Incomplete - statePre: Incomplete - transitionMatrix: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def correct(self, *args, **kwargs): ... # incomplete - def predict(self, *args, **kwargs): ... # incomplete - - -class KeyPoint: - angle: Incomplete - class_id: Incomplete - octave: Incomplete - pt: Incomplete - response: Incomplete - size: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def convert(self, *args, **kwargs): ... # incomplete - def overlap(self, *args, **kwargs): ... # incomplete - - -class LineSegmentDetector(Algorithm): - def compareSegments(self, *args, **kwargs): ... # incomplete - def detect(self, *args, **kwargs): ... # incomplete - def drawSegments(self, image, lines) -> _image: ... - - -class MSER(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def detectRegions(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getDelta(self, *args, **kwargs): ... # incomplete - def getMaxArea(self, *args, **kwargs): ... # incomplete - def getMinArea(self, *args, **kwargs): ... # incomplete - def getPass2Only(self, *args, **kwargs): ... # incomplete - def setDelta(self, delta) -> None: ... - def setMaxArea(self, maxArea) -> None: ... - def setMinArea(self, minArea) -> None: ... - def setPass2Only(self, f) -> None: ... - - -class MergeDebevec(MergeExposures): - def process(self, *args, **kwargs): ... # incomplete - - -class MergeExposures(Algorithm): - def process(self, *args, **kwargs): ... # incomplete - - -class MergeMertens(MergeExposures): - def getContrastWeight(self, *args, **kwargs): ... # incomplete - def getExposureWeight(self, *args, **kwargs): ... # incomplete - def getSaturationWeight(self, *args, **kwargs): ... # incomplete - def process(self, *args, **kwargs): ... # incomplete - def setContrastWeight(self, contrast_weiht) -> None: ... - def setExposureWeight(self, exposure_weight) -> None: ... - def setSaturationWeight(self, saturation_weight) -> None: ... - - -class MergeRobertson(MergeExposures): - def process(self, *args, **kwargs): ... # incomplete - - -class ORB(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - def getEdgeThreshold(self, *args, **kwargs): ... # incomplete - def getFastThreshold(self, *args, **kwargs): ... # incomplete - def getFirstLevel(self, *args, **kwargs): ... # incomplete - def getMaxFeatures(self, *args, **kwargs): ... # incomplete - def getNLevels(self, *args, **kwargs): ... # incomplete - def getPatchSize(self, *args, **kwargs): ... # incomplete - def getScaleFactor(self, *args, **kwargs): ... # incomplete - def getScoreType(self, *args, **kwargs): ... # incomplete - def getWTA_K(self, *args, **kwargs): ... # incomplete - def setEdgeThreshold(self, edgeThreshold) -> None: ... - def setFastThreshold(self, fastThreshold) -> None: ... - def setFirstLevel(self, firstLevel) -> None: ... - def setMaxFeatures(self, maxFeatures) -> None: ... - def setNLevels(self, nlevels) -> None: ... - def setPatchSize(self, patchSize) -> None: ... - def setScaleFactor(self, scaleFactor) -> None: ... - def setScoreType(self, scoreType) -> None: ... - def setWTA_K(self, wta_k) -> None: ... - - -class PyRotationWarper: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def buildMaps(self, *args, **kwargs): ... # incomplete - def getScale(self, *args, **kwargs): ... # incomplete - def setScale(self, arg1) -> None: ... - def warp(self, *args, **kwargs): ... # incomplete - def warpBackward(self, *args, **kwargs): ... # incomplete - def warpPoint(self, *args, **kwargs): ... # incomplete - def warpPointBackward(self, *args, **kwargs): ... # incomplete - def warpRoi(self, *args, **kwargs): ... # incomplete - - -class QRCodeDetector: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def decode(self, *args, **kwargs): ... # incomplete - def decodeCurved(self, *args, **kwargs): ... # incomplete - def decodeMulti(self, *args, **kwargs): ... # incomplete - def detect(self, *args, **kwargs): ... # incomplete - def detectAndDecode(self, *args, **kwargs): ... # incomplete - def detectAndDecodeCurved(self, *args, **kwargs): ... # incomplete - def detectAndDecodeMulti(self, *args, **kwargs): ... # incomplete - def detectMulti(self, *args, **kwargs): ... # incomplete - def setEpsX(self, epsX) -> None: ... - def setEpsY(self, epsY) -> None: ... - - -class QRCodeEncoder: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def encode(self, *args, **kwargs): ... # incomplete - def encodeStructuredAppend(self, *args, **kwargs): ... # incomplete - - -class QRCodeEncoder_Params: - correction_level: Incomplete - mode: Incomplete - structure_number: Incomplete - version: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class SIFT(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - - -class SimpleBlobDetector(Feature2D): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getDefaultName(self, *args, **kwargs): ... # incomplete - - -class SimpleBlobDetector_Params: - blobColor: Incomplete - filterByArea: Incomplete - filterByCircularity: Incomplete - filterByColor: Incomplete - filterByConvexity: Incomplete - filterByInertia: Incomplete - maxArea: Incomplete - maxCircularity: Incomplete - maxConvexity: Incomplete - maxInertiaRatio: Incomplete - maxThreshold: Incomplete - minArea: Incomplete - minCircularity: Incomplete - minConvexity: Incomplete - minDistBetweenBlobs: Incomplete - minInertiaRatio: Incomplete - minRepeatability: Incomplete - minThreshold: Incomplete - thresholdStep: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class SparseOpticalFlow(Algorithm): - def calc(self, *args, **kwargs): ... # incomplete - - -class SparsePyrLKOpticalFlow(SparseOpticalFlow): - def create(self, *args, **kwargs): ... # incomplete - def getFlags(self, *args, **kwargs): ... # incomplete - def getMaxLevel(self, *args, **kwargs): ... # incomplete - def getMinEigThreshold(self, *args, **kwargs): ... # incomplete - def getTermCriteria(self, *args, **kwargs): ... # incomplete - def getWinSize(self, *args, **kwargs): ... # incomplete - def setFlags(self, flags: int | None) -> None: ... - def setMaxLevel(self, maxLevel) -> None: ... - def setMinEigThreshold(self, minEigThreshold) -> None: ... - def setTermCriteria(self, crit) -> None: ... - def setWinSize(self, winSize) -> None: ... - - -class StereoBM(StereoMatcher): - def create(self, *args, **kwargs): ... # incomplete - def getPreFilterCap(self, *args, **kwargs): ... # incomplete - def getPreFilterSize(self, *args, **kwargs): ... # incomplete - def getPreFilterType(self, *args, **kwargs): ... # incomplete - def getROI1(self, *args, **kwargs): ... # incomplete - def getROI2(self, *args, **kwargs): ... # incomplete - def getSmallerBlockSize(self, *args, **kwargs): ... # incomplete - def getTextureThreshold(self, *args, **kwargs): ... # incomplete - def getUniquenessRatio(self, *args, **kwargs): ... # incomplete - def setPreFilterCap(self, preFilterCap) -> None: ... - def setPreFilterSize(self, preFilterSize) -> None: ... - def setPreFilterType(self, preFilterType) -> None: ... - def setROI1(self, roi1) -> None: ... - def setROI2(self, roi2) -> None: ... - def setSmallerBlockSize(self, blockSize) -> None: ... - def setTextureThreshold(self, textureThreshold) -> None: ... - def setUniquenessRatio(self, uniquenessRatio) -> None: ... - - -class StereoMatcher(Algorithm): - def compute(self, *args, **kwargs): ... # incomplete - def getBlockSize(self, *args, **kwargs): ... # incomplete - def getDisp12MaxDiff(self, *args, **kwargs): ... # incomplete - def getMinDisparity(self, *args, **kwargs): ... # incomplete - def getNumDisparities(self, *args, **kwargs): ... # incomplete - def getSpeckleRange(self, *args, **kwargs): ... # incomplete - def getSpeckleWindowSize(self, *args, **kwargs): ... # incomplete - def setBlockSize(self, blockSize) -> None: ... - def setDisp12MaxDiff(self, disp12MaxDiff) -> None: ... - def setMinDisparity(self, minDisparity) -> None: ... - def setNumDisparities(self, numDisparities) -> None: ... - def setSpeckleRange(self, speckleRange) -> None: ... - def setSpeckleWindowSize(self, speckleWindowSize) -> None: ... - - -class StereoSGBM(StereoMatcher): - def create(self, *args, **kwargs): ... # incomplete - def getMode(self, *args, **kwargs): ... # incomplete - def getP1(self, *args, **kwargs): ... # incomplete - def getP2(self, *args, **kwargs): ... # incomplete - def getPreFilterCap(self, *args, **kwargs): ... # incomplete - def getUniquenessRatio(self, *args, **kwargs): ... # incomplete - def setMode(self, mode) -> None: ... - def setP1(self, P1) -> None: ... - def setP2(self, P2) -> None: ... - def setPreFilterCap(self, preFilterCap) -> None: ... - def setUniquenessRatio(self, uniquenessRatio) -> None: ... - - -class Stitcher: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def composePanorama(self, *args, **kwargs): ... # incomplete - def compositingResol(self, *args, **kwargs): ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def estimateTransform(self, *args, **kwargs): ... # incomplete - def interpolationFlags(self, *args, **kwargs): ... # incomplete - def panoConfidenceThresh(self, *args, **kwargs): ... # incomplete - def registrationResol(self, *args, **kwargs): ... # incomplete - def seamEstimationResol(self, *args, **kwargs): ... # incomplete - def setCompositingResol(self, resol_mpx) -> None: ... - def setInterpolationFlags(self, interp_flags: int | None) -> None: ... - def setPanoConfidenceThresh(self, conf_thresh) -> None: ... - def setRegistrationResol(self, resol_mpx) -> None: ... - def setSeamEstimationResol(self, resol_mpx) -> None: ... - def setWaveCorrection(self, flag) -> None: ... - def stitch(self, *args, **kwargs): ... # incomplete - def waveCorrection(self, *args, **kwargs): ... # incomplete - def workScale(self, *args, **kwargs): ... # incomplete - - -class Subdiv2D: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def edgeDst(self, *args, **kwargs): ... # incomplete - def edgeOrg(self, *args, **kwargs): ... # incomplete - def findNearest(self, *args, **kwargs): ... # incomplete - def getEdge(self, *args, **kwargs): ... # incomplete - def getEdgeList(self) -> _edgeList: ... - def getLeadingEdgeList(self) -> _leadingEdgeList: ... - def getTriangleList(self) -> _triangleList: ... - def getVertex(self, *args, **kwargs): ... # incomplete - def getVoronoiFacetList(self, *args, **kwargs): ... # incomplete - def initDelaunay(self, rect) -> None: ... - def insert(self, ptvec) -> None: ... - def locate(self, *args, **kwargs): ... # incomplete - def nextEdge(self, *args, **kwargs): ... # incomplete - def rotateEdge(self, *args, **kwargs): ... # incomplete - def symEdge(self, *args, **kwargs): ... # incomplete - - -class TickMeter: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getAvgTimeMilli(self, *args, **kwargs): ... # incomplete - def getAvgTimeSec(self, *args, **kwargs): ... # incomplete - def getCounter(self, *args, **kwargs): ... # incomplete - def getFPS(self, *args, **kwargs): ... # incomplete - def getTimeMicro(self, *args, **kwargs): ... # incomplete - def getTimeMilli(self, *args, **kwargs): ... # incomplete - def getTimeSec(self, *args, **kwargs): ... # incomplete - def getTimeTicks(self, *args, **kwargs): ... # incomplete - def reset(self) -> None: ... - def start(self) -> None: ... - def stop(self) -> None: ... - - -class Tonemap(Algorithm): - def getGamma(self, *args, **kwargs): ... # incomplete - def process(self, *args, **kwargs): ... # incomplete - def setGamma(self, gamma) -> None: ... - - -class TonemapDrago(Tonemap): - def getBias(self, *args, **kwargs): ... # incomplete - def getSaturation(self, *args, **kwargs): ... # incomplete - def setBias(self, bias) -> None: ... - def setSaturation(self, saturation) -> None: ... - - -class TonemapMantiuk(Tonemap): - def getSaturation(self, *args, **kwargs): ... # incomplete - def getScale(self, *args, **kwargs): ... # incomplete - def setSaturation(self, saturation) -> None: ... - def setScale(self, scale) -> None: ... - - -class TonemapReinhard(Tonemap): - def getColorAdaptation(self, *args, **kwargs): ... # incomplete - def getIntensity(self, *args, **kwargs): ... # incomplete - def getLightAdaptation(self, *args, **kwargs): ... # incomplete - def setColorAdaptation(self, color_adapt) -> None: ... - def setIntensity(self, intensity) -> None: ... - def setLightAdaptation(self, light_adapt) -> None: ... - - -class Tracker: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def init(self, image, boundingBox) -> None: ... - def update(self, *args, **kwargs): ... # incomplete - - -class TrackerDaSiamRPN(Tracker): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getTrackingScore(self, *args, **kwargs): ... # incomplete - - -class TrackerDaSiamRPN_Params: - backend: Incomplete - kernel_cls1: Incomplete - kernel_r1: Incomplete - model: Incomplete - target: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class TrackerGOTURN(Tracker): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - - -class TrackerGOTURN_Params: - modelBin: Incomplete - modelTxt: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class TrackerMIL(Tracker): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - - -class TrackerMIL_Params: - featureSetNumFeatures: Incomplete - samplerInitInRadius: Incomplete - samplerInitMaxNegNum: Incomplete - samplerSearchWinSize: Incomplete - samplerTrackInRadius: Incomplete - samplerTrackMaxNegNum: Incomplete - samplerTrackMaxPosNum: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class UMat: - offset: Incomplete - @overload - def __init__(self, usageFlags: int | None = ...) -> None: ... - @overload - def __init__(self, rows: int | None, cols: int | None, type: int | None, usageFlags: int | None = ...) -> None: ... - @overload - def __init__(self, size: _Size | None, type: int | None, usageFlags: int | None = ...) -> None: ... - - @overload - def __init__( - self, rows: int | None, cols: int | None, type: int | None, s: _Scalar, usageFlags: int | None = ..., - ) -> None: ... - @overload - def __init__(self, size: _Size | None, type: int | None, s: _Scalar, usageFlags: int | None = ...) -> None: ... - @overload - def __init__(self, m: _UMat) -> None: ... - @overload - def __init__(self, m: _UMat, rowRange: _Range | None, colRange: _Range | None = ...) -> None: ... - @overload - def __init__(self, m: _UMat, roi: _Rect | None) -> None: ... - @overload - def __init__(self, m: _UMat, ranges: Sequence[_Range | None] | None) -> None: ... - @staticmethod - def context(): ... - def get(self): ... - def handle(self, accessFlags): ... - def isContinuous(self): ... - def isSubmatrix(self): ... - @staticmethod - def queue(): ... - - -class UsacParams: - confidence: Incomplete - isParallel: Incomplete - loIterations: Incomplete - loMethod: Incomplete - loSampleSize: Incomplete - maxIterations: Incomplete - neighborsSearch: Incomplete - randomGeneratorState: Incomplete - sampler: Incomplete - score: Incomplete - threshold: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class VariationalRefinement(DenseOpticalFlow): - def calcUV(self, *args, **kwargs): ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getAlpha(self, *args, **kwargs): ... # incomplete - def getDelta(self, *args, **kwargs): ... # incomplete - def getFixedPointIterations(self, *args, **kwargs): ... # incomplete - def getGamma(self, *args, **kwargs): ... # incomplete - def getOmega(self, *args, **kwargs): ... # incomplete - def getSorIterations(self, *args, **kwargs): ... # incomplete - def setAlpha(self, val) -> None: ... - def setDelta(self, val) -> None: ... - def setFixedPointIterations(self, val) -> None: ... - def setGamma(self, val) -> None: ... - def setOmega(self, val) -> None: ... - def setSorIterations(self, val) -> None: ... - - -class VideoCapture: - @overload - def __init__(self) -> None: ... - @overload - def __init__(self, filename: str) -> None: ... - @overload - def __init__(self, filename: str, apiPreference: int | None, params: Sequence[int] = ...) -> None: ... - @overload - def __init__(self, index: int) -> None: ... - @overload - def __init__(self, index: int, apiPreference: int | None, params: Sequence[int] = ...) -> None: ... - def get(self, propId: int) -> float: ... - def getBackendName(self) -> str: ... - def getExceptionMode(self) -> bool: ... - def grab(self) -> bool: ... - def isOpened(self) -> bool: ... - @overload - def open(self, filename: str, apiPreference: int = ...) -> bool: ... - @overload - def open(self, filename: str, apiPreference: int, params: Sequence[int]) -> bool: ... - @overload - def open(self, index: int, apiPreference: int = ...) -> bool: ... - @overload - def open(self, index: int, apiPreference: int, params: Sequence[int]) -> bool: ... - @overload - def read(self, image: Mat | None = ...) -> tuple[bool, Mat]: ... - @overload - def read(self, image: _UMat) -> tuple[bool, UMat]: ... - def release(self) -> None: ... - @overload - def retrieve(self, image: Mat | None = ..., flag: int = ...) -> tuple[bool, Mat]: ... - @overload - def retrieve(self, image: _UMat, flag: int = ...) -> tuple[bool, UMat]: ... - def set(self, propId: int, value: float) -> bool: ... - def setExceptionMode(self, enable: bool) -> None: ... - - -class VideoWriter: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def fourcc(self, *args, **kwargs): ... # incomplete - def get(self, *args, **kwargs): ... # incomplete - def getBackendName(self, *args, **kwargs): ... # incomplete - def isOpened(self, *args, **kwargs): ... # incomplete - def open(self, *args, **kwargs): ... # incomplete - def release(self) -> None: ... - def set(self, *args, **kwargs): ... # incomplete - def write(self, image) -> None: ... - - -class WarperCreator: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class cuda_BufferPool: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getAllocator(self, *args, **kwargs): ... # incomplete - def getBuffer(self, *args, **kwargs): ... # incomplete - - -class cuda_DeviceInfo: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def ECCEnabled(self, *args, **kwargs): ... # incomplete - def asyncEngineCount(self, *args, **kwargs): ... # incomplete - def canMapHostMemory(self, *args, **kwargs): ... # incomplete - def clockRate(self, *args, **kwargs): ... # incomplete - def computeMode(self, *args, **kwargs): ... # incomplete - def concurrentKernels(self, *args, **kwargs): ... # incomplete - def deviceID(self, *args, **kwargs): ... # incomplete - def freeMemory(self, *args, **kwargs): ... # incomplete - def integrated(self, *args, **kwargs): ... # incomplete - def isCompatible(self, *args, **kwargs): ... # incomplete - def kernelExecTimeoutEnabled(self, *args, **kwargs): ... # incomplete - def l2CacheSize(self, *args, **kwargs): ... # incomplete - def majorVersion(self, *args, **kwargs): ... # incomplete - def maxGridSize(self, *args, **kwargs): ... # incomplete - def maxSurface1D(self, *args, **kwargs): ... # incomplete - def maxSurface1DLayered(self, *args, **kwargs): ... # incomplete - def maxSurface2D(self, *args, **kwargs): ... # incomplete - def maxSurface2DLayered(self, *args, **kwargs): ... # incomplete - def maxSurface3D(self, *args, **kwargs): ... # incomplete - def maxSurfaceCubemap(self, *args, **kwargs): ... # incomplete - def maxSurfaceCubemapLayered(self, *args, **kwargs): ... # incomplete - def maxTexture1D(self, *args, **kwargs): ... # incomplete - def maxTexture1DLayered(self, *args, **kwargs): ... # incomplete - def maxTexture1DLinear(self, *args, **kwargs): ... # incomplete - def maxTexture1DMipmap(self, *args, **kwargs): ... # incomplete - def maxTexture2D(self, *args, **kwargs): ... # incomplete - def maxTexture2DGather(self, *args, **kwargs): ... # incomplete - def maxTexture2DLayered(self, *args, **kwargs): ... # incomplete - def maxTexture2DLinear(self, *args, **kwargs): ... # incomplete - def maxTexture2DMipmap(self, *args, **kwargs): ... # incomplete - def maxTexture3D(self, *args, **kwargs): ... # incomplete - def maxTextureCubemap(self, *args, **kwargs): ... # incomplete - def maxTextureCubemapLayered(self, *args, **kwargs): ... # incomplete - def maxThreadsDim(self, *args, **kwargs): ... # incomplete - def maxThreadsPerBlock(self, *args, **kwargs): ... # incomplete - def maxThreadsPerMultiProcessor(self, *args, **kwargs): ... # incomplete - def memPitch(self, *args, **kwargs): ... # incomplete - def memoryBusWidth(self, *args, **kwargs): ... # incomplete - def memoryClockRate(self, *args, **kwargs): ... # incomplete - def minorVersion(self, *args, **kwargs): ... # incomplete - def multiProcessorCount(self, *args, **kwargs): ... # incomplete - def pciBusID(self, *args, **kwargs): ... # incomplete - def pciDeviceID(self, *args, **kwargs): ... # incomplete - def pciDomainID(self, *args, **kwargs): ... # incomplete - def queryMemory(self, totalMemory, freeMemory) -> None: ... - def regsPerBlock(self, *args, **kwargs): ... # incomplete - def sharedMemPerBlock(self, *args, **kwargs): ... # incomplete - def surfaceAlignment(self, *args, **kwargs): ... # incomplete - def tccDriver(self, *args, **kwargs): ... # incomplete - def textureAlignment(self, *args, **kwargs): ... # incomplete - def texturePitchAlignment(self, *args, **kwargs): ... # incomplete - def totalConstMem(self, *args, **kwargs): ... # incomplete - def totalGlobalMem(self, *args, **kwargs): ... # incomplete - def totalMemory(self, *args, **kwargs): ... # incomplete - def unifiedAddressing(self, *args, **kwargs): ... # incomplete - def warpSize(self, *args, **kwargs): ... # incomplete - - -class cuda_Event: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def elapsedTime(self, *args, **kwargs): ... # incomplete - def queryIfComplete(self, *args, **kwargs): ... # incomplete - def record(self, *args, **kwargs): ... # incomplete - def waitForCompletion(self) -> None: ... - - -class cuda_GpuData: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class cuda_GpuMat: - step: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def adjustROI(self, *args, **kwargs): ... # incomplete - def assignTo(self, *args, **kwargs): ... # incomplete - def channels(self, *args, **kwargs): ... # incomplete - def clone(self, *args, **kwargs): ... # incomplete - def col(self, *args, **kwargs): ... # incomplete - def colRange(self, *args, **kwargs): ... # incomplete - def convertTo(self, *args, **kwargs): ... # incomplete - def copyTo(self, *args, **kwargs): ... # incomplete - @overload - def create(self, rows, cols, type) -> None: ... - @overload - def create(self, size, type) -> None: ... - def cudaPtr(self, *args, **kwargs): ... # incomplete - def defaultAllocator(self, *args, **kwargs): ... # incomplete - def depth(self, *args, **kwargs): ... # incomplete - def download(self, *args, **kwargs): ... # incomplete - def elemSize(self, *args, **kwargs): ... # incomplete - def elemSize1(self, *args, **kwargs): ... # incomplete - def empty(self, *args, **kwargs): ... # incomplete - def isContinuous(self, *args, **kwargs): ... # incomplete - def locateROI(self, wholeSize, ofs) -> None: ... - def reshape(self, *args, **kwargs): ... # incomplete - def row(self, *args, **kwargs): ... # incomplete - def rowRange(self, *args, **kwargs): ... # incomplete - def setDefaultAllocator(self, *args, **kwargs): ... # incomplete - def setTo(self, *args, **kwargs): ... # incomplete - def size(self, *args, **kwargs): ... # incomplete - def step1(self, *args, **kwargs): ... # incomplete - def swap(self, mat) -> None: ... - def type(self, *args, **kwargs): ... # incomplete - def updateContinuityFlag(self) -> None: ... - @overload - def upload(self, arr) -> None: ... - @overload - def upload(self, arr, stream) -> None: ... - - -class cuda_GpuMatND: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class cuda_GpuMat_Allocator: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class cuda_HostMem: - step: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def channels(self, *args, **kwargs): ... # incomplete - def clone(self, *args, **kwargs): ... # incomplete - def create(self, rows, cols, type) -> None: ... - def createMatHeader(self, *args, **kwargs): ... # incomplete - def depth(self, *args, **kwargs): ... # incomplete - def elemSize(self, *args, **kwargs): ... # incomplete - def elemSize1(self, *args, **kwargs): ... # incomplete - def empty(self, *args, **kwargs): ... # incomplete - def isContinuous(self, *args, **kwargs): ... # incomplete - def reshape(self, *args, **kwargs): ... # incomplete - def size(self, *args, **kwargs): ... # incomplete - def step1(self, *args, **kwargs): ... # incomplete - def swap(self, b) -> None: ... - def type(self, *args, **kwargs): ... # incomplete - - -class cuda_Stream: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - @staticmethod - def Null() -> cuda_Stream: ... - def cudaPtr(self, *args, **kwargs): ... # incomplete - def queryIfComplete(self, *args, **kwargs): ... # incomplete - def waitEvent(self, event) -> None: ... - def waitForCompletion(self) -> None: ... - - -class cuda_TargetArchs: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def has(self, *args, **kwargs): ... # incomplete - def hasBin(self, *args, **kwargs): ... # incomplete - def hasEqualOrGreater(self, *args, **kwargs): ... # incomplete - def hasEqualOrGreaterBin(self, *args, **kwargs): ... # incomplete - def hasEqualOrGreaterPtx(self, *args, **kwargs): ... # incomplete - def hasEqualOrLessPtx(self, *args, **kwargs): ... # incomplete - def hasPtx(self, *args, **kwargs): ... # incomplete - - -class detail_AffineBasedEstimator(detail_Estimator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_AffineBestOf2NearestMatcher(detail_BestOf2NearestMatcher): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_BestOf2NearestMatcher(detail_FeaturesMatcher): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def collectGarbage(self) -> None: ... - def create(self, *args, **kwargs): ... # incomplete - - -class detail_BestOf2NearestRangeMatcher(detail_BestOf2NearestMatcher): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_Blender: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def blend(self, *args, **kwargs): ... # incomplete - def createDefault(self, *args, **kwargs): ... # incomplete - def feed(self, img, mask, tl) -> None: ... - @overload - def prepare(self, corners, sizes) -> None: ... - @overload - def prepare(self, dst_roi) -> None: ... - - -class detail_BlocksChannelsCompensator(detail_BlocksCompensator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_BlocksCompensator(detail_ExposureCompensator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def apply(self, index, corner, image, mask) -> _image: ... - def getBlockSize(self, *args, **kwargs): ... # incomplete - def getMatGains(self, *args, **kwargs): ... # incomplete - def getNrFeeds(self, *args, **kwargs): ... # incomplete - def getNrGainsFilteringIterations(self, *args, **kwargs): ... # incomplete - def getSimilarityThreshold(self, *args, **kwargs): ... # incomplete - @overload - def setBlockSize(self, width, height) -> None: ... - @overload - def setBlockSize(self, size) -> None: ... - def setMatGains(self, umv) -> None: ... - def setNrFeeds(self, nr_feeds) -> None: ... - def setNrGainsFilteringIterations(self, nr_iterations) -> None: ... - def setSimilarityThreshold(self, similarity_threshold) -> None: ... - - -class detail_BlocksGainCompensator(detail_BlocksCompensator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def apply(self, index, corner, image, mask) -> _image: ... - def getMatGains(self, *args, **kwargs): ... # incomplete - def setMatGains(self, umv) -> None: ... - - -class detail_BundleAdjusterAffine(detail_BundleAdjusterBase): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_BundleAdjusterAffinePartial(detail_BundleAdjusterBase): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_BundleAdjusterBase(detail_Estimator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def confThresh(self, *args, **kwargs): ... # incomplete - def refinementMask(self, *args, **kwargs): ... # incomplete - def setConfThresh(self, conf_thresh) -> None: ... - def setRefinementMask(self, mask) -> None: ... - def setTermCriteria(self, term_criteria) -> None: ... - def termCriteria(self, *args, **kwargs): ... # incomplete - - -class detail_BundleAdjusterRay(detail_BundleAdjusterBase): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_BundleAdjusterReproj(detail_BundleAdjusterBase): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_CameraParams: - R: Incomplete - aspect: Incomplete - focal: Incomplete - ppx: Incomplete - ppy: Incomplete - t: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def K(self, *args, **kwargs): ... # incomplete - - -class detail_ChannelsCompensator(detail_ExposureCompensator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def apply(self, index, corner, image, mask) -> _image: ... - def getMatGains(self, *args, **kwargs): ... # incomplete - def getNrFeeds(self, *args, **kwargs): ... # incomplete - def getSimilarityThreshold(self, *args, **kwargs): ... # incomplete - def setMatGains(self, umv) -> None: ... - def setNrFeeds(self, nr_feeds) -> None: ... - def setSimilarityThreshold(self, similarity_threshold) -> None: ... - - -class detail_DpSeamFinder(detail_SeamFinder): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def setCostFunction(self, val) -> None: ... - - -class detail_Estimator: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def apply(self, *args, **kwargs): ... # incomplete - - -class detail_ExposureCompensator: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def apply(self, index, corner, image, mask) -> _image: ... - def createDefault(self, *args, **kwargs): ... # incomplete - def feed(self, corners, images, masks) -> None: ... - def getMatGains(self, *args, **kwargs): ... # incomplete - def getUpdateGain(self, *args, **kwargs): ... # incomplete - def setMatGains(self, arg1) -> None: ... - def setUpdateGain(self, b) -> None: ... - - -class detail_FeatherBlender(detail_Blender): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def blend(self, *args, **kwargs): ... # incomplete - def createWeightMaps(self, *args, **kwargs): ... # incomplete - def feed(self, img, mask, tl) -> None: ... - def prepare(self, dst_roi) -> None: ... # type: ignore[override] - def setSharpness(self, val) -> None: ... - def sharpness(self, *args, **kwargs): ... # incomplete - - -class detail_FeaturesMatcher: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def apply(self, features1, features2) -> _matches_info: ... - def apply2(self, *args, **kwargs): ... # incomplete - def collectGarbage(self) -> None: ... - def isThreadSafe(self, *args, **kwargs): ... # incomplete - - -class detail_GainCompensator(detail_ExposureCompensator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def apply(self, index, corner, image, mask) -> _image: ... - def getMatGains(self, *args, **kwargs): ... # incomplete - def getNrFeeds(self, *args, **kwargs): ... # incomplete - def getSimilarityThreshold(self, *args, **kwargs): ... # incomplete - def setMatGains(self, umv) -> None: ... - def setNrFeeds(self, nr_feeds) -> None: ... - def setSimilarityThreshold(self, similarity_threshold) -> None: ... - - -class detail_GraphCutSeamFinder: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def find(self, src, corners, masks) -> None: ... - - -class detail_HomographyBasedEstimator(detail_Estimator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_ImageFeatures: - descriptors: Incomplete - img_idx: Incomplete - img_size: Incomplete - keypoints: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getKeypoints(self, *args, **kwargs): ... # incomplete - - -class detail_MatchesInfo: - H: Incomplete - confidence: Incomplete - dst_img_idx: Incomplete - num_inliers: Incomplete - src_img_idx: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getInliers(self, *args, **kwargs): ... # incomplete - def getMatches(self, *args, **kwargs): ... # incomplete - - -class detail_MultiBandBlender(detail_Blender): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def blend(self, *args, **kwargs): ... # incomplete - def feed(self, img, mask, tl) -> None: ... - def numBands(self, *args, **kwargs): ... # incomplete - def prepare(self, dst_roi) -> None: ... # type: ignore[override] - def setNumBands(self, val) -> None: ... - - -class detail_NoBundleAdjuster(detail_BundleAdjusterBase): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_NoExposureCompensator(detail_ExposureCompensator): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def apply(self, arg1, arg2, arg3, arg4) -> _arg3: ... - def getMatGains(self, *args, **kwargs): ... # incomplete - def setMatGains(self, umv) -> None: ... - - -class detail_NoSeamFinder(detail_SeamFinder): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def find(self, arg1, arg2, arg3) -> _arg3: ... - - -class detail_PairwiseSeamFinder(detail_SeamFinder): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def find(self, src, corners, masks) -> _masks: ... - - -class detail_ProjectorBase: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_SeamFinder: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def createDefault(self, *args, **kwargs): ... # incomplete - def find(self, src, corners, masks) -> _masks: ... - - -class detail_SphericalProjector(detail_ProjectorBase): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def mapBackward(self, u, v, x, y) -> None: ... - def mapForward(self, x, y, u, v) -> None: ... - - -class detail_Timelapser: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def createDefault(self, *args, **kwargs): ... # incomplete - def getDst(self, *args, **kwargs): ... # incomplete - def initialize(self, corners, sizes) -> None: ... - def process(self, img, mask, tl) -> None: ... - - -class detail_TimelapserCrop(detail_Timelapser): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class detail_VoronoiSeamFinder(detail_PairwiseSeamFinder): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def find(self, src, corners, masks) -> _masks: ... - - -class dnn_ClassificationModel(dnn_Model): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def classify(self, *args, **kwargs): ... # incomplete - - -class dnn_DetectionModel(dnn_Model): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def detect(self, *args, **kwargs): ... # incomplete - def getNmsAcrossClasses(self, *args, **kwargs): ... # incomplete - def setNmsAcrossClasses(self, *args, **kwargs): ... # incomplete - - -class dnn_DictValue: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getIntValue(self, *args, **kwargs): ... # incomplete - def getRealValue(self, *args, **kwargs): ... # incomplete - def getStringValue(self, *args, **kwargs): ... # incomplete - def isInt(self, *args, **kwargs): ... # incomplete - def isReal(self, *args, **kwargs): ... # incomplete - def isString(self, *args, **kwargs): ... # incomplete - - -class dnn_KeypointsModel(dnn_Model): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def estimate(self, *args, **kwargs): ... # incomplete - - -class dnn_Layer(Algorithm): - blobs: Incomplete - name: Incomplete - preferableTarget: Incomplete - type: Incomplete - def finalize(self, *args, **kwargs): ... # incomplete - def outputNameToIndex(self, *args, **kwargs): ... # incomplete - def run(self, *args, **kwargs): ... # incomplete - - -class dnn_Model: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def predict(self, *args, **kwargs): ... # incomplete - def setInputCrop(self, *args, **kwargs): ... # incomplete - def setInputMean(self, *args, **kwargs): ... # incomplete - def setInputParams(self, *args, **kwargs): ... # incomplete - def setInputScale(self, *args, **kwargs): ... # incomplete - def setInputSize(self, *args, **kwargs): ... # incomplete - def setInputSwapRB(self, *args, **kwargs): ... # incomplete - def setPreferableBackend(self, *args, **kwargs): ... # incomplete - def setPreferableTarget(self, *args, **kwargs): ... # incomplete - - -class dnn_Net: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def connect(self, outPin, inpPin) -> None: ... - def dump(self, *args, **kwargs): ... # incomplete - def dumpToFile(self, path) -> None: ... - def empty(self, *args, **kwargs): ... # incomplete - def enableFusion(self, fusion) -> None: ... - def forward(self, *args, **kwargs): ... # incomplete - def forwardAndRetrieve(self, outBlobNames) -> _outputBlobs: ... - def forwardAsync(self, *args, **kwargs): ... # incomplete - def getFLOPS(self, *args, **kwargs): ... # incomplete - def getInputDetails(self, *args, **kwargs): ... # incomplete - def getLayer(self, *args, **kwargs): ... # incomplete - def getLayerId(self, *args, **kwargs): ... # incomplete - def getLayerNames(self, *args, **kwargs): ... # incomplete - def getLayerTypes(self) -> _layersTypes: ... - def getLayersCount(self, *args, **kwargs): ... # incomplete - def getLayersShapes(self, *args, **kwargs): ... # incomplete - def getMemoryConsumption(self, *args, **kwargs): ... # incomplete - def getOutputDetails(self, *args, **kwargs): ... # incomplete - def getParam(self, *args, **kwargs): ... # incomplete - def getPerfProfile(self, *args, **kwargs): ... # incomplete - def getUnconnectedOutLayers(self, *args, **kwargs): ... # incomplete - def getUnconnectedOutLayersNames(self, *args, **kwargs): ... # incomplete - def quantize(self, *args, **kwargs): ... # incomplete - def readFromModelOptimizer(self, *args, **kwargs): ... # incomplete - def setHalideScheduler(self, scheduler) -> None: ... - def setInput(self, *args, **kwargs): ... # incomplete - def setInputShape(self, inputName, shape) -> None: ... - def setInputsNames(self, inputBlobNames) -> None: ... - def setParam(self, layer, numParam, blob) -> None: ... - def setPreferableBackend(self, backendId) -> None: ... - def setPreferableTarget(self, targetId) -> None: ... - - -class dnn_SegmentationModel(dnn_Model): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def segment(self, *args, **kwargs): ... # incomplete - - -class dnn_TextDetectionModel(dnn_Model): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def detect(self, frame) -> _detections: ... - def detectTextRectangles(self, frame) -> _detections: ... - - -class dnn_TextDetectionModel_DB(dnn_TextDetectionModel): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getBinaryThreshold(self, *args, **kwargs): ... # incomplete - def getMaxCandidates(self, *args, **kwargs): ... # incomplete - def getPolygonThreshold(self, *args, **kwargs): ... # incomplete - def getUnclipRatio(self, *args, **kwargs): ... # incomplete - def setBinaryThreshold(self, *args, **kwargs): ... # incomplete - def setMaxCandidates(self, *args, **kwargs): ... # incomplete - def setPolygonThreshold(self, *args, **kwargs): ... # incomplete - def setUnclipRatio(self, *args, **kwargs): ... # incomplete - - -class dnn_TextDetectionModel_EAST(dnn_TextDetectionModel): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getConfidenceThreshold(self, *args, **kwargs): ... # incomplete - def getNMSThreshold(self, *args, **kwargs): ... # incomplete - def setConfidenceThreshold(self, *args, **kwargs): ... # incomplete - def setNMSThreshold(self, *args, **kwargs): ... # incomplete - - -class dnn_TextRecognitionModel(dnn_Model): - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getDecodeType(self, *args, **kwargs): ... # incomplete - def getVocabulary(self, *args, **kwargs): ... # incomplete - def recognize(self, frame, roiRects) -> _results: ... - def setDecodeOptsCTCPrefixBeamSearch(self, *args, **kwargs): ... # incomplete - def setDecodeType(self, *args, **kwargs): ... # incomplete - def setVocabulary(self, *args, **kwargs): ... # incomplete - - -class error(Exception): - code: ClassVar[int] - err: ClassVar[str] - file: ClassVar[str] - func: ClassVar[str] - line: ClassVar[int] - msg: ClassVar[str] - - -class flann_Index: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def build(self, *args, **kwargs): ... # incomplete - def getAlgorithm(self, *args, **kwargs): ... # incomplete - def getDistance(self, *args, **kwargs): ... # incomplete - def knnSearch(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def radiusSearch(self, *args, **kwargs): ... # incomplete - def release(self) -> None: ... - def save(self, filename) -> None: ... - - -class gapi_GKernelPackage: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_GNetPackage: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_GNetParam: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_ie_PyParams: - @overload - def __init__(self) -> None: ... - @overload - def __init__(self, tag: str, model: str, device: str) -> None: ... - @overload - def __init__(self, tag: str, model: str, weights: str, device: str) -> None: ... - def cfgBatchSize(self, size): ... - def cfgNumRequests(self, nireq): ... - def constInput(self, layer_name, data, hint=...): ... - - -class gapi_streaming_queue_capacity: - capacity: int - def __init__(self, cap: int = ...) -> None: ... - - -class gapi_wip_GOutputs: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def getGArray(self, *args, **kwargs): ... # incomplete - def getGMat(self, *args, **kwargs): ... # incomplete - def getGOpaque(self, *args, **kwargs): ... # incomplete - def getGScalar(self, *args, **kwargs): ... # incomplete - - -class gapi_wip_IStreamSource: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_wip_draw_Circle: - center: Incomplete - color: Incomplete - lt: Incomplete - radius: Incomplete - shift: Incomplete - thick: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_wip_draw_Image: - alpha: Incomplete - img: Incomplete - org: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_wip_draw_Line: - color: Incomplete - lt: Incomplete - pt1: Incomplete - pt2: Incomplete - shift: Incomplete - thick: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_wip_draw_Mosaic: - cellSz: Incomplete - decim: Incomplete - mos: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_wip_draw_Poly: - color: Incomplete - lt: Incomplete - points: Incomplete - shift: Incomplete - thick: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_wip_draw_Rect: - color: Incomplete - lt: Incomplete - rect: Incomplete - shift: Incomplete - thick: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class gapi_wip_draw_Text: - bottom_left_origin: bool - color: tuple[float, float, float, float] - ff: int - fs: float - lt: int - org: _Point - text: str - thick: int - def __init__(self, text_: str, org_: _Point, ff_: int, fs_: float, color_: _Scalar) -> None: ... - - -class ml_ANN_MLP(ml_StatModel): - def create(self, *args, **kwargs): ... # incomplete - def getAnnealCoolingRatio(self, *args, **kwargs): ... # incomplete - def getAnnealFinalT(self, *args, **kwargs): ... # incomplete - def getAnnealInitialT(self, *args, **kwargs): ... # incomplete - def getAnnealItePerStep(self, *args, **kwargs): ... # incomplete - def getBackpropMomentumScale(self, *args, **kwargs): ... # incomplete - def getBackpropWeightScale(self, *args, **kwargs): ... # incomplete - def getLayerSizes(self, *args, **kwargs): ... # incomplete - def getRpropDW0(self, *args, **kwargs): ... # incomplete - def getRpropDWMax(self, *args, **kwargs): ... # incomplete - def getRpropDWMin(self, *args, **kwargs): ... # incomplete - def getRpropDWMinus(self, *args, **kwargs): ... # incomplete - def getRpropDWPlus(self, *args, **kwargs): ... # incomplete - def getTermCriteria(self, *args, **kwargs): ... # incomplete - def getTrainMethod(self, *args, **kwargs): ... # incomplete - def getWeights(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def setActivationFunction(self, *args, **kwargs): ... # incomplete - def setAnnealCoolingRatio(self, val) -> None: ... - def setAnnealFinalT(self, val) -> None: ... - def setAnnealInitialT(self, val) -> None: ... - def setAnnealItePerStep(self, val) -> None: ... - def setBackpropMomentumScale(self, val) -> None: ... - def setBackpropWeightScale(self, val) -> None: ... - def setLayerSizes(self, _layer_sizes) -> None: ... - def setRpropDW0(self, val) -> None: ... - def setRpropDWMax(self, val) -> None: ... - def setRpropDWMin(self, val) -> None: ... - def setRpropDWMinus(self, val) -> None: ... - def setRpropDWPlus(self, val) -> None: ... - def setTermCriteria(self, val) -> None: ... - def setTrainMethod(self, *args, **kwargs): ... # incomplete - - -class ml_Boost(ml_DTrees): - def create(self, *args, **kwargs): ... # incomplete - def getBoostType(self, *args, **kwargs): ... # incomplete - def getWeakCount(self, *args, **kwargs): ... # incomplete - def getWeightTrimRate(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def setBoostType(self, val) -> None: ... - def setWeakCount(self, val) -> None: ... - def setWeightTrimRate(self, val) -> None: ... - - -class ml_DTrees(ml_StatModel): - def create(self, *args, **kwargs): ... # incomplete - def getCVFolds(self, *args, **kwargs): ... # incomplete - def getMaxCategories(self, *args, **kwargs): ... # incomplete - def getMaxDepth(self, *args, **kwargs): ... # incomplete - def getMinSampleCount(self, *args, **kwargs): ... # incomplete - def getPriors(self, *args, **kwargs): ... # incomplete - def getRegressionAccuracy(self, *args, **kwargs): ... # incomplete - def getTruncatePrunedTree(self, *args, **kwargs): ... # incomplete - def getUse1SERule(self, *args, **kwargs): ... # incomplete - def getUseSurrogates(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def setCVFolds(self, val) -> None: ... - def setMaxCategories(self, val) -> None: ... - def setMaxDepth(self, val) -> None: ... - def setMinSampleCount(self, val) -> None: ... - def setPriors(self, val) -> None: ... - def setRegressionAccuracy(self, val) -> None: ... - def setTruncatePrunedTree(self, val) -> None: ... - def setUse1SERule(self, val) -> None: ... - def setUseSurrogates(self, val) -> None: ... - - -class ml_EM(ml_StatModel): - def create(self, *args, **kwargs): ... # incomplete - def getClustersNumber(self, *args, **kwargs): ... # incomplete - def getCovarianceMatrixType(self, *args, **kwargs): ... # incomplete - def getCovs(self, *args, **kwargs): ... # incomplete - def getMeans(self, *args, **kwargs): ... # incomplete - def getTermCriteria(self, *args, **kwargs): ... # incomplete - def getWeights(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def predict(self, *args, **kwargs): ... # incomplete - def predict2(self, *args, **kwargs): ... # incomplete - def setClustersNumber(self, val) -> None: ... - def setCovarianceMatrixType(self, val) -> None: ... - def setTermCriteria(self, val) -> None: ... - def trainE(self, *args, **kwargs): ... # incomplete - def trainEM(self, *args, **kwargs): ... # incomplete - def trainM(self, *args, **kwargs): ... # incomplete - - -class ml_KNearest(ml_StatModel): - def create(self, *args, **kwargs): ... # incomplete - def findNearest(self, *args, **kwargs): ... # incomplete - def getAlgorithmType(self, *args, **kwargs): ... # incomplete - def getDefaultK(self, *args, **kwargs): ... # incomplete - def getEmax(self, *args, **kwargs): ... # incomplete - def getIsClassifier(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def setAlgorithmType(self, val) -> None: ... - def setDefaultK(self, val) -> None: ... - def setEmax(self, val) -> None: ... - def setIsClassifier(self, val) -> None: ... - - -class ml_LogisticRegression(ml_StatModel): - def create(self, *args, **kwargs): ... # incomplete - def getIterations(self, *args, **kwargs): ... # incomplete - def getLearningRate(self, *args, **kwargs): ... # incomplete - def getMiniBatchSize(self, *args, **kwargs): ... # incomplete - def getRegularization(self, *args, **kwargs): ... # incomplete - def getTermCriteria(self, *args, **kwargs): ... # incomplete - def getTrainMethod(self, *args, **kwargs): ... # incomplete - def get_learnt_thetas(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def predict(self, *args, **kwargs): ... # incomplete - def setIterations(self, val) -> None: ... - def setLearningRate(self, val) -> None: ... - def setMiniBatchSize(self, val) -> None: ... - def setRegularization(self, val) -> None: ... - def setTermCriteria(self, val) -> None: ... - def setTrainMethod(self, val) -> None: ... - - -class ml_NormalBayesClassifier(ml_StatModel): - def create(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def predictProb(self, *args, **kwargs): ... # incomplete - - -class ml_ParamGrid: - logStep: Incomplete - maxVal: Incomplete - minVal: Incomplete - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - - -class ml_RTrees(ml_DTrees): - def create(self, *args, **kwargs): ... # incomplete - def getActiveVarCount(self, *args, **kwargs): ... # incomplete - def getCalculateVarImportance(self, *args, **kwargs): ... # incomplete - def getOOBError(self, *args, **kwargs): ... # incomplete - def getTermCriteria(self, *args, **kwargs): ... # incomplete - def getVarImportance(self, *args, **kwargs): ... # incomplete - def getVotes(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def setActiveVarCount(self, val) -> None: ... - def setCalculateVarImportance(self, val) -> None: ... - def setTermCriteria(self, val) -> None: ... - - -class ml_SVM(ml_StatModel): - def create(self, *args, **kwargs): ... # incomplete - def getC(self, *args, **kwargs): ... # incomplete - def getClassWeights(self, *args, **kwargs): ... # incomplete - def getCoef0(self, *args, **kwargs): ... # incomplete - def getDecisionFunction(self, *args, **kwargs): ... # incomplete - def getDefaultGridPtr(self, *args, **kwargs): ... # incomplete - def getDegree(self, *args, **kwargs): ... # incomplete - def getGamma(self, *args, **kwargs): ... # incomplete - def getKernelType(self, *args, **kwargs): ... # incomplete - def getNu(self, *args, **kwargs): ... # incomplete - def getP(self, *args, **kwargs): ... # incomplete - def getSupportVectors(self, *args, **kwargs): ... # incomplete - def getTermCriteria(self, *args, **kwargs): ... # incomplete - def getType(self, *args, **kwargs): ... # incomplete - def getUncompressedSupportVectors(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def setC(self, val) -> None: ... - def setClassWeights(self, val) -> None: ... - def setCoef0(self, val) -> None: ... - def setDegree(self, val) -> None: ... - def setGamma(self, val) -> None: ... - def setKernel(self, kernelType) -> None: ... - def setNu(self, val) -> None: ... - def setP(self, val) -> None: ... - def setTermCriteria(self, val) -> None: ... - def setType(self, val) -> None: ... - def trainAuto(self, *args, **kwargs): ... # incomplete - - -class ml_SVMSGD(ml_StatModel): - def create(self, *args, **kwargs): ... # incomplete - def getInitialStepSize(self, *args, **kwargs): ... # incomplete - def getMarginRegularization(self, *args, **kwargs): ... # incomplete - def getMarginType(self, *args, **kwargs): ... # incomplete - def getShift(self, *args, **kwargs): ... # incomplete - def getStepDecreasingPower(self, *args, **kwargs): ... # incomplete - def getSvmsgdType(self, *args, **kwargs): ... # incomplete - def getTermCriteria(self, *args, **kwargs): ... # incomplete - def getWeights(self, *args, **kwargs): ... # incomplete - def load(self, *args, **kwargs): ... # incomplete - def setInitialStepSize(self, InitialStepSize) -> None: ... - def setMarginRegularization(self, marginRegularization) -> None: ... - def setMarginType(self, marginType) -> None: ... - def setOptimalParameters(self, *args, **kwargs): ... # incomplete - def setStepDecreasingPower(self, stepDecreasingPower) -> None: ... - def setSvmsgdType(self, svmsgdType) -> None: ... - def setTermCriteria(self, val) -> None: ... - - -class ml_StatModel(Algorithm): - def calcError(self, *args, **kwargs): ... # incomplete - def empty(self, *args, **kwargs): ... # incomplete - def getVarCount(self, *args, **kwargs): ... # incomplete - def isClassifier(self, *args, **kwargs): ... # incomplete - def isTrained(self, *args, **kwargs): ... # incomplete - def predict(self, *args, **kwargs): ... # incomplete - def train(self, *args, **kwargs): ... # incomplete - - -class ml_TrainData: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def create(self, *args, **kwargs): ... # incomplete - def getCatCount(self, *args, **kwargs): ... # incomplete - def getCatMap(self, *args, **kwargs): ... # incomplete - def getCatOfs(self, *args, **kwargs): ... # incomplete - def getClassLabels(self, *args, **kwargs): ... # incomplete - def getDefaultSubstValues(self, *args, **kwargs): ... # incomplete - def getLayout(self, *args, **kwargs): ... # incomplete - def getMissing(self, *args, **kwargs): ... # incomplete - def getNAllVars(self, *args, **kwargs): ... # incomplete - def getNSamples(self, *args, **kwargs): ... # incomplete - def getNTestSamples(self, *args, **kwargs): ... # incomplete - def getNTrainSamples(self, *args, **kwargs): ... # incomplete - def getNVars(self, *args, **kwargs): ... # incomplete - def getNames(self, names) -> None: ... - def getNormCatResponses(self, *args, **kwargs): ... # incomplete - def getResponseType(self, *args, **kwargs): ... # incomplete - def getResponses(self, *args, **kwargs): ... # incomplete - def getSample(self, varIdx, sidx, buf) -> None: ... - def getSampleWeights(self, *args, **kwargs): ... # incomplete - def getSamples(self, *args, **kwargs): ... # incomplete - def getSubMatrix(self, *args, **kwargs): ... # incomplete - def getSubVector(self, *args, **kwargs): ... # incomplete - def getTestNormCatResponses(self, *args, **kwargs): ... # incomplete - def getTestResponses(self, *args, **kwargs): ... # incomplete - def getTestSampleIdx(self, *args, **kwargs): ... # incomplete - def getTestSampleWeights(self, *args, **kwargs): ... # incomplete - def getTestSamples(self, *args, **kwargs): ... # incomplete - def getTrainNormCatResponses(self, *args, **kwargs): ... # incomplete - def getTrainResponses(self, *args, **kwargs): ... # incomplete - def getTrainSampleIdx(self, *args, **kwargs): ... # incomplete - def getTrainSampleWeights(self, *args, **kwargs): ... # incomplete - def getTrainSamples(self, *args, **kwargs): ... # incomplete - def getValues(self, vi, sidx, values) -> None: ... - def getVarIdx(self, *args, **kwargs): ... # incomplete - def getVarSymbolFlags(self, *args, **kwargs): ... # incomplete - def getVarType(self, *args, **kwargs): ... # incomplete - def setTrainTestSplit(self, *args, **kwargs): ... # incomplete - def setTrainTestSplitRatio(self, *args, **kwargs): ... # incomplete - def shuffleTrainTest(self) -> None: ... - - -class ocl_Device: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def OpenCLVersion(self, *args, **kwargs): ... # incomplete - def OpenCL_C_Version(self, *args, **kwargs): ... # incomplete - def addressBits(self, *args, **kwargs): ... # incomplete - def available(self, *args, **kwargs): ... # incomplete - def compilerAvailable(self, *args, **kwargs): ... # incomplete - def deviceVersionMajor(self, *args, **kwargs): ... # incomplete - def deviceVersionMinor(self, *args, **kwargs): ... # incomplete - def doubleFPConfig(self, *args, **kwargs): ... # incomplete - def driverVersion(self, *args, **kwargs): ... # incomplete - def endianLittle(self, *args, **kwargs): ... # incomplete - def errorCorrectionSupport(self, *args, **kwargs): ... # incomplete - def executionCapabilities(self, *args, **kwargs): ... # incomplete - def extensions(self, *args, **kwargs): ... # incomplete - def getDefault(self, *args, **kwargs): ... # incomplete - def globalMemCacheLineSize(self, *args, **kwargs): ... # incomplete - def globalMemCacheSize(self, *args, **kwargs): ... # incomplete - def globalMemCacheType(self, *args, **kwargs): ... # incomplete - def globalMemSize(self, *args, **kwargs): ... # incomplete - def halfFPConfig(self, *args, **kwargs): ... # incomplete - def hostUnifiedMemory(self, *args, **kwargs): ... # incomplete - def image2DMaxHeight(self, *args, **kwargs): ... # incomplete - def image2DMaxWidth(self, *args, **kwargs): ... # incomplete - def image3DMaxDepth(self, *args, **kwargs): ... # incomplete - def image3DMaxHeight(self, *args, **kwargs): ... # incomplete - def image3DMaxWidth(self, *args, **kwargs): ... # incomplete - def imageFromBufferSupport(self, *args, **kwargs): ... # incomplete - def imageMaxArraySize(self, *args, **kwargs): ... # incomplete - def imageMaxBufferSize(self, *args, **kwargs): ... # incomplete - def imageSupport(self, *args, **kwargs): ... # incomplete - def intelSubgroupsSupport(self, *args, **kwargs): ... # incomplete - def isAMD(self, *args, **kwargs): ... # incomplete - def isExtensionSupported(self, *args, **kwargs): ... # incomplete - def isIntel(self, *args, **kwargs): ... # incomplete - def isNVidia(self, *args, **kwargs): ... # incomplete - def linkerAvailable(self, *args, **kwargs): ... # incomplete - def localMemSize(self, *args, **kwargs): ... # incomplete - def localMemType(self, *args, **kwargs): ... # incomplete - def maxClockFrequency(self, *args, **kwargs): ... # incomplete - def maxComputeUnits(self, *args, **kwargs): ... # incomplete - def maxConstantArgs(self, *args, **kwargs): ... # incomplete - def maxConstantBufferSize(self, *args, **kwargs): ... # incomplete - def maxMemAllocSize(self, *args, **kwargs): ... # incomplete - def maxParameterSize(self, *args, **kwargs): ... # incomplete - def maxReadImageArgs(self, *args, **kwargs): ... # incomplete - def maxSamplers(self, *args, **kwargs): ... # incomplete - def maxWorkGroupSize(self, *args, **kwargs): ... # incomplete - def maxWorkItemDims(self, *args, **kwargs): ... # incomplete - def maxWriteImageArgs(self, *args, **kwargs): ... # incomplete - def memBaseAddrAlign(self, *args, **kwargs): ... # incomplete - def name(self, *args, **kwargs): ... # incomplete - def nativeVectorWidthChar(self, *args, **kwargs): ... # incomplete - def nativeVectorWidthDouble(self, *args, **kwargs): ... # incomplete - def nativeVectorWidthFloat(self, *args, **kwargs): ... # incomplete - def nativeVectorWidthHalf(self, *args, **kwargs): ... # incomplete - def nativeVectorWidthInt(self, *args, **kwargs): ... # incomplete - def nativeVectorWidthLong(self, *args, **kwargs): ... # incomplete - def nativeVectorWidthShort(self, *args, **kwargs): ... # incomplete - def preferredVectorWidthChar(self, *args, **kwargs): ... # incomplete - def preferredVectorWidthDouble(self, *args, **kwargs): ... # incomplete - def preferredVectorWidthFloat(self, *args, **kwargs): ... # incomplete - def preferredVectorWidthHalf(self, *args, **kwargs): ... # incomplete - def preferredVectorWidthInt(self, *args, **kwargs): ... # incomplete - def preferredVectorWidthLong(self, *args, **kwargs): ... # incomplete - def preferredVectorWidthShort(self, *args, **kwargs): ... # incomplete - def printfBufferSize(self, *args, **kwargs): ... # incomplete - def profilingTimerResolution(self, *args, **kwargs): ... # incomplete - def singleFPConfig(self, *args, **kwargs): ... # incomplete - def type(self, *args, **kwargs): ... # incomplete - def vendorID(self, *args, **kwargs): ... # incomplete - def vendorName(self, *args, **kwargs): ... # incomplete - def version(self, *args, **kwargs): ... # incomplete - - -class ocl_OpenCLExecutionContext: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - - -class segmentation_IntelligentScissorsMB: - def __init__(self, *args, **kwargs) -> None: ... # incomplete - def applyImage(self, *args, **kwargs): ... # incomplete - def applyImageFeatures(self, *args, **kwargs): ... # incomplete - def buildMap(self, sourcePt) -> None: ... - def getContour(self, *args, **kwargs): ... # incomplete - def setEdgeFeatureCannyParameters(self, *args, **kwargs): ... # incomplete - def setEdgeFeatureZeroCrossingParameters(self, *args, **kwargs): ... # incomplete - def setGradientMagnitudeMaxLimit(self, *args, **kwargs): ... # incomplete - def setWeights(self, *args, **kwargs): ... # incomplete - - -def AKAZE_create( - descriptor_type=..., - descriptor_size=..., - descriptor_channels=..., - threshold=..., - nOctaves=..., - nOctaveLayers=..., - diffusivity=..., -): ... -def AffineFeature_create(*args, **kwargs): ... # incomplete -def AgastFeatureDetector_create(threshold=..., nonmaxSuppression=..., type=...): ... -def BFMatcher_create(normType: int = ..., crossCheck=...): ... -@overload -def BRISK_create(thresh=..., octaves=..., patternScale=...): ... -@overload -def BRISK_create(radiusList, numberList, dMax=..., dMin=..., indexChange=...): ... -@overload -def BRISK_create(thresh, octaves, radiusList, numberList, dMax=..., dMin=..., indexChange=...): ... -def CamShift(probImage, window, criteria) -> tuple[_RotatedRectResult, _window]: ... -@overload -def Canny(image: Mat, threshold1, threshold2, edges=..., apertureSize=..., L2gradient=...) -> _edges: ... -@overload -def Canny(dx, dy, threshold1, threshold2, edges=..., L2gradient=...) -> _edges: ... -def CascadeClassifier_convert(oldcascade, newcascade): ... -def DISOpticalFlow_create(preset=...): ... -@overload -def DescriptorMatcher_create(descriptorMatcherType: str) -> DescriptorMatcher: ... -@overload -def DescriptorMatcher_create(matcherType: int) -> DescriptorMatcher: ... - - -def EMD( - signature1, - signature2, - distType, - cost=..., - lowerBound=..., - flow=..., -) -> tuple[ - tuple[ - Incomplete, - _lowerBound, - _flow, - ] -]: ... - - -def FaceDetectorYN_create(*args, **kwargs): ... # incomplete -def FaceRecognizerSF_create(*args, **kwargs): ... # incomplete - - -def FarnebackOpticalFlow_create( - numLevels=..., - pyrScale=..., - fastPyramids=..., - winSize=..., - numIters=..., - polyN=..., - polySigma=..., - flags: int | None = ..., -): ... - - -def FastFeatureDetector_create(threshold=..., nonmaxSuppression=..., type=...): ... -def FlannBasedMatcher_create(): ... - - -@overload -def GFTTDetector_create( - maxCorners=..., - qualityLevel=..., - minDistance=..., - blockSize=..., - useHarrisDetector=..., - k=..., -): ... - - -@overload -def GFTTDetector_create( - maxCorners, - qualityLevel, - minDistance, - blockSize, - gradiantSize, - useHarrisDetector=..., - k=..., -): ... - - -def GaussianBlur(src: Mat, ksize, sigmaX, dst: Mat = ..., sigmaY=..., borderType=...) -> _dst: ... -def HOGDescriptor_getDaimlerPeopleDetector(): ... -def HOGDescriptor_getDefaultPeopleDetector(): ... - - -def HoughCircles( - image: Mat, method: int, dp, minDist, circles=..., param1=..., param2=..., minRadius=..., maxRadius=..., -) -> _circles: ... - - -def HoughLines( - image: Mat, rho, theta, threshold, lines=..., srn=..., - stn=..., min_theta=..., max_theta=..., -) -> _lines: ... - - -def HoughLinesP(image: Mat, rho, theta, threshold, lines=..., minLineLength=..., maxLineGap=...) -> _lines: ... - - -def HoughLinesPointSet( - _point, lines_max, threshold, min_rho, max_rho, rho_step, min_theta, max_theta, theta_step, _lines=..., -) -> _lines: ... -def HoughLinesWithAccumulator(*args, **kwargs): ... # incomplete -def HuMoments(m, hu=...) -> _hu: ... -def KAZE_create(extended=..., upright=..., threshold=..., nOctaves=..., nOctaveLayers=..., diffusivity=...): ... -@overload -def KeyPoint_convert(keypoints, keypointIndexes=...) -> _points2f: ... -@overload -def KeyPoint_convert(points2f, size=..., response=..., octave=..., class_id=...) -> _keypoints: ... -def KeyPoint_overlap(kp1, kp2): ... -def LUT(src: Mat, lut, dst: Mat = ...) -> _dst: ... -def Laplacian(src: Mat, ddepth, dst: Mat = ..., ksize=..., scale=..., delta=..., borderType=...) -> _dst: ... - - -def MSER_create( - _delta=..., - _min_area=..., - _max_area=..., - _max_variation=..., - _min_diversity=..., - _max_evolution=..., - _area_threshold=..., - _min_margin=..., - _edge_blur_size=..., -): ... -def Mahalanobis(v1, v2, icovar): ... - - -def ORB_create( - nfeatures=..., - scaleFactor=..., - nlevels=..., - edgeThreshold=..., - firstLevel=..., - WTA_K=..., - scoreType=..., - patchSize=..., - fastThreshold=..., -): ... -def PCABackProject(data, mean, eigenvectors, result=...): ... -@overload -def PCACompute(data, mean, eigenvectors=..., maxComponents=...) -> tuple[tuple[_mean, _eigenvectors]]: ... -@overload -def PCACompute(data, mean, retainedVariance, eigenvectors=...) -> tuple[tuple[_mean, _eigenvectors]]: ... - - -@overload -def PCACompute2( - data, mean, eigenvectors=..., eigenvalues=..., maxComponents=..., -) -> tuple[tuple[_mean, _eigenvectors, _eigenvalues]]: ... - - -@overload -def PCACompute2( - data, mean, retainedVariance, eigenvectors=..., eigenvalues=..., -) -> tuple[tuple[_mean, _eigenvectors, _eigenvalues]]: ... -def PCAProject(data, mean, eigenvectors, result=...) -> _result: ... -def PSNR(src1: Mat, src2: Mat, R=...): ... -def QRCodeEncoder_create(*args, **kwargs): ... # incomplete - - -def RQDecomp3x3( - src: Mat, mtxR=..., mtxQ=..., Qx=..., Qy=..., Qz=..., -) -> tuple[tuple[Incomplete, _mtxR, _mtxQ, _Qx, _Qy, _Qz]]: ... -def Rodrigues(src: Mat, dst: Mat = ..., jacobian=...) -> tuple[tuple[_dst, _jacobian]]: ... -def SIFT_create(nfeatures=..., nOctaveLayers=..., contrastThreshold=..., edgeThreshold=..., sigma=...): ... -def SVBackSubst(w, u, vt, rhs, dst: Mat = ...) -> _dst: ... -def SVDecomp(src: Mat, w=..., u=..., vt=..., flags: int | None = ...) -> tuple[tuple[_w, _u, _vt]]: ... -def Scharr(src: Mat, ddepth, dx, dy, dst: Mat = ..., scale=..., delta=..., borderType=...) -> _dst: ... -def SimpleBlobDetector_create(parameters=...): ... -def Sobel(src: Mat, ddepth, dx, dy, dst: Mat = ..., ksize=..., scale=..., delta=..., borderType=...) -> _dst: ... - - -def SparsePyrLKOpticalFlow_create( - winSize=..., - maxLevel=..., - crit=..., - flags: int | None = ..., - minEigThreshold=..., -): ... - - -def StereoBM_create(numDisparities=..., blockSize=...): ... - - -def StereoSGBM_create( - minDisparity=..., - numDisparities=..., - blockSize=..., - P1=..., - P2=..., - disp12MaxDiff=..., - preFilterCap=..., - uniquenessRatio=..., - speckleWindowSize=..., - speckleRange=..., - mode=..., -): ... -def Stitcher_create(mode=...): ... -def TrackerDaSiamRPN_create(*args, **kwargs): ... # incomplete -def TrackerGOTURN_create(*args, **kwargs): ... # incomplete -def TrackerMIL_create(*args, **kwargs): ... # incomplete -def UMat_context(): ... -def UMat_queue(): ... -def VariationalRefinement_create(): ... -def VideoWriter_fourcc(c1, c2, c3, c4): ... -def _registerMatType(*args, **kwargs): ... # incomplete -def absdiff(src1: Mat, src2: Mat, dst: Mat = ...) -> _dst: ... -def accumulate(src: Mat, dst: Mat, mask: Mat = ...) -> _dst: ... -def accumulateProduct(src1: Mat, src2: Mat, dst: Mat, mask: Mat = ...) -> _dst: ... -def accumulateSquare(src: Mat, dst: Mat, mask: Mat = ...) -> _dst: ... -def accumulateWeighted(src: Mat, dst: Mat, alpha, mask: Mat = ...) -> _dst: ... -def adaptiveThreshold(src: Mat, maxValue, adaptiveMethod, thresholdType, blockSize, C, dst: Mat = ...) -> _dst: ... -def add(src1: Mat | _NumericScalar, src2: Mat | _NumericScalar, dst: Mat = ..., mask: Mat = ..., dtype=...) -> _dst: ... -def addText(img: Mat, text, org, nameFont, pointSize=..., color=..., weight=..., style=..., spacing=...) -> None: ... -def addWeighted(src1: Mat, alpha, src2: Mat, beta, gamma, dst: Mat = ..., dtype=...) -> _dst: ... -@overload -def applyColorMap(src: Mat, colormap, dst: Mat = ...) -> _dst: ... -@overload -def applyColorMap(src, userColor, dst=...) -> _dst: ... -def approxPolyDP(curve, epsilon, closed, approxCurve=...) -> _approxCurve: ... -def arcLength(curve, closed): ... -def arrowedLine(img: Mat, pt1, pt2, color, thickness=..., line_type=..., shift=..., tipLength=...) -> _img: ... - - -def batchDistance( - src1: Mat, - src2: Mat, - dtype, - dist=..., - nidx=..., - normType: int = ..., - K=..., - mask: Mat = ..., - update=..., - crosscheck=..., -) -> tuple[ - _dist, - _nidx, -]: ... - - -def bilateralFilter(src: Mat, d, sigmaColor, sigmaSpace, dst: Mat = ..., borderType=...) -> _dst: ... -def bitwise_and(src1: Mat, src2: Mat, dst: Mat = ..., mask: Mat = ...) -> _dst: ... -def bitwise_not(src: Mat, dst: Mat = ..., mask: Mat = ...) -> _dst: ... -def bitwise_or(src1: Mat, src2: Mat, dst: Mat = ..., mask: Mat = ...) -> _dst: ... -def bitwise_xor(src1: Mat, src2: Mat, dst: Mat = ..., mask: Mat = ...) -> _dst: ... -def blendLinear(*args, **kwargs): ... # incomplete -def blur(src: Mat, ksize, dst: Mat = ..., anchor=..., borderType=...) -> _dst: ... -def borderInterpolate(p, len, borderType): ... -def boundingRect(array): ... -def boxFilter(src: Mat, ddepth, ksize, dst: Mat = ..., anchor=..., normalize=..., borderType=...) -> _dst: ... -def boxPoints(box, points=...) -> _points: ... - - -def buildOpticalFlowPyramid( - img: Mat, - winSize, - maxLevel, - pyramid=..., - withDerivatives=..., - pyrBorder=..., - derivBorder=..., - tryReuseInputImage=..., -) -> tuple[ - Incomplete, - _pyramid, -]: ... - - -def calcBackProject( - images: Sequence[Mat], channels: Sequence[int], hist, ranges: Sequence[int], scale, dst: Mat = ..., -) -> _dst: ... -def calcCovarMatrix(samples, mean, flags: int | None, covar=..., ctype=...) -> tuple[_covar, _mean]: ... - - -def calcHist( - images: Sequence[Mat], - channels: Sequence[int], - mask: Mat | None, - histSize: Sequence[int], - ranges: Sequence[int], - hist: Mat = ..., - accumulate=..., -) -> Mat: ... - - -def calcOpticalFlowFarneback( - prev, next, flow, pyr_scale, levels, winsize, iterations, poly_n, poly_sigma, flags: int | None, -) -> _flow: ... - - -def calcOpticalFlowPyrLK( - prevImg, - nextImg, - prevPts, - nextPts, - status=..., - err=..., - winSize=..., - maxLevel=..., - criteria=..., - flags: int | None = ..., - minEigThreshold=..., -) -> tuple[_nextPts, _status, _err]: ... - - -def calibrateCamera( - objectPoints, - imagePoints, - imageSize, - cameraMatrix, - distCoeffs, - rvecs=..., - tvecs=..., - flags: int | None = ..., - criteria=..., -) -> tuple[ - Incomplete, - _cameraMatrix, - _distCoeffs, - _rvecs, - _tvecs, -]: ... - - -def calibrateCameraExtended( - objectPoints, - imagePoints, - imageSize, - cameraMatrix, - distCoeffs, - rvecs=..., - tvecs=..., - stdDeviationsIntrinsics=..., - stdDeviationsExtrinsics=..., - perViewErrors=..., - flags: int | None = ..., - criteria=..., -) -> tuple[ - Incomplete, - _cameraMatrix, - _distCoeffs, - _rvecs, - _tvecs, - _stdDeviationsIntrinsics, - _stdDeviationsExtrinsics, - _perViewErrors, -]: ... - - -def calibrateCameraRO( - objectPoints, - imagePoints, - imageSize, - iFixedPoint, - cameraMatrix, - distCoeffs, - rvecs=..., - tvecs=..., - newObjPoints=..., - flags: int | None = ..., - criteria=..., -) -> tuple[Incomplete, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, _newObjPoints]: ... - - -def calibrateCameraROExtended( - objectPoints, - imagePoints, - imageSize, - iFixedPoint, - cameraMatrix, - distCoeffs, - rvecs=..., - tvecs=..., - newObjPoints=..., - stdDeviationsIntrinsics=..., - stdDeviationsExtrinsics=..., - stdDeviationsObjPoints=..., - perViewErrors=..., - flags: int | None = ..., - criteria=..., -) -> tuple[ - Incomplete, - _cameraMatrix, - _distCoeffs, - _rvecs, - _tvecs, - _newObjPoints, - _stdDeviationsIntrinsics, - _stdDeviationsExtrinsics, - _stdDeviationsObjPoints, - _perViewErrors, -]: ... - - -def calibrateHandEye( - R_gripper2base, t_gripper2base, R_target2cam, t_target2cam, R_cam2gripper=..., t_cam2gripper=..., method: int = ..., -) -> tuple[_R_cam2gripper, _t_cam2gripper]: ... -def calibrateRobotWorldHandEye(*args, **kwargs): ... # incomplete - - -def calibrationMatrixValues( - cameraMatrix, imageSize, apertureWidth, apertureHeight, -) -> tuple[_fovx, _fovy, _focalLength, _principalPoint, _aspectRatio]: ... -def cartToPolar(x, y, magnitude=..., angle=..., angleInDegrees=...) -> tuple[_magnitude, _angle]: ... -def checkChessboard(img: Mat, size): ... -def checkHardwareSupport(feature): ... -def checkRange(a, quiet=..., minVal=..., maxVal=...) -> tuple[Incomplete, _pos]: ... -def circle(img: Mat, center, radius, color, thickness=..., lineType=..., shift=...) -> _img: ... -def clipLine(imgRect, pt1, pt2) -> tuple[Incomplete, _pt1, _pt2]: ... -def colorChange(src: Mat, mask: Mat, dst: Mat = ..., red_mul=..., green_mul=..., blue_mul=...) -> _dst: ... -def compare(src1: Mat, src2: Mat, cmpop, dst: Mat = ...) -> _dst: ... -def compareHist(H1: Mat, H2: Mat, method: int) -> float: ... -def completeSymm(m, lowerToUpper=...) -> _m: ... - - -def composeRT( - rvec1, - tvec1, - rvec2, - tvec2, - rvec3=..., - tvec3=..., - dr3dr1=..., - dr3dt1=..., - dr3dr2=..., - dr3dt2=..., - dt3dr1=..., - dt3dt1=..., - dt3dr2=..., - dt3dt2=..., -) -> tuple[_rvec3, _tvec3, _dr3dr1, _dr3dt1, _dr3dr2, _dr3dt2, _dt3dr1, _dt3dt1, _dt3dr2, _dt3dt2]: ... -def computeCorrespondEpilines(points, whichImage, F, lines=...) -> _lines: ... -def computeECC(templateImage, inputImage, inputMask=...): ... -def connectedComponents(image: Mat, labels=..., connectivity=..., ltype=...) -> tuple[Incomplete, _labels]: ... - - -def connectedComponentsWithAlgorithm( - image: Mat, - connectivity, - ltype, - ccltype, - labels=..., -) -> tuple[ - Incomplete, - _labels, -]: ... - - -def connectedComponentsWithStats( - image: Mat, labels=..., stats=..., centroids=..., connectivity=..., ltype=..., -) -> tuple[Incomplete, _labels, _stats, _centroids]: ... - - -def connectedComponentsWithStatsWithAlgorithm( - image: Mat, connectivity, ltype, ccltype, labels=..., stats=..., centroids=..., -) -> tuple[Incomplete, _labels, _stats, _centroids]: ... -@overload -def contourArea(approx): ... -@overload -def contourArea(contour, oriented=...): ... -def convertFp16(src: Mat, dst: Mat = ...) -> _dst: ... - - -def convertMaps( - map1, - map2, - dstmap1type, - dstmap1=..., - dstmap2=..., - nninterpolation=..., -) -> tuple[ - _dstmap1, - _dstmap2, -]: ... - - -def convertPointsFromHomogeneous(src: Mat, dst: Mat = ...) -> _dst: ... -def convertPointsToHomogeneous(src: Mat, dst: Mat = ...) -> _dst: ... -def convertScaleAbs(src: Mat, dst: Mat = ..., alpha=..., beta=...) -> _dst: ... -def convexHull(points, hull=..., clockwise=..., returnPoints=...) -> _hull: ... -def convexityDefects(contour, convexhull, convexityDefects=...) -> _convexityDefects: ... -def copyMakeBorder(src: Mat, top, bottom, left, right, borderType, dst: Mat = ..., value=...) -> _dst: ... -def copyTo(src: Mat, mask: Mat, dst: Mat = ...) -> _dst: ... -def cornerEigenValsAndVecs(src: Mat, blockSize, ksize, dst: Mat = ..., borderType=...) -> _dst: ... -def cornerHarris(src: Mat, blockSize, ksize, k, dst: Mat = ..., borderType=...) -> _dst: ... -def cornerMinEigenVal(src: Mat, blockSize, dst: Mat = ..., ksize=..., borderType=...) -> _dst: ... -def cornerSubPix(image: Mat, corners, winSize, zeroZone, criteria) -> _corners: ... -def correctMatches(F, points1, points2, newPoints1=..., newPoints2=...) -> tuple[_newPoints1, _newPoints2]: ... -def countNonZero(src: Mat | _NumericScalar) -> int: ... -def createAlignMTB(max_bits=..., exclude_range=..., cut=...): ... -def createBackgroundSubtractorKNN(history=..., dist2Threshold=..., detectShadows=...): ... -def createBackgroundSubtractorMOG2(history=..., varThreshold=..., detectShadows=...): ... -def createButton(buttonName, onChange, userData=..., buttonType=..., initialButtonState=...) -> None: ... -def createCLAHE(clipLimit=..., tileGridSize=...): ... -def createCalibrateDebevec(samples=..., lambda_=..., random=...): ... -def createCalibrateRobertson(max_iter=..., threshold=...): ... -def createGeneralizedHoughBallard(): ... -def createGeneralizedHoughGuil(): ... -def createHanningWindow(winSize, type, dst: Mat = ...) -> _dst: ... - - -def createLineSegmentDetector( - _refine=..., _scale=..., _sigma_scale=..., _quant=..., _ang_th=..., _log_eps=..., _density_th=..., _n_bins=..., -): ... -def createMergeDebevec(): ... -def createMergeMertens(contrast_weight=..., saturation_weight=..., exposure_weight=...): ... -def createMergeRobertson(): ... -def createTonemap(gamma=...): ... -def createTonemapDrago(gamma=..., saturation=..., bias=...): ... -def createTonemapMantiuk(gamma=..., scale=..., saturation=...): ... -def createTonemapReinhard(gamma=..., intensity=..., light_adapt=..., color_adapt=...): ... -def createTrackbar(trackbarName, windowName, value, count, onChange) -> None: ... -def cubeRoot(val): ... -def cvtColor(src: Mat, code: int, dst: Mat = ..., dstCn: int = ...) -> Mat: ... -def cvtColorTwoPlane(src1: Mat, src2: Mat, code: int, dst: Mat = ...) -> _dst: ... -def dct(src: Mat, dst: Mat = ..., flags: int | None = ...) -> _dst: ... -def decolor(src: Mat, grayscale=..., color_boost=...) -> tuple[_grayscale, _color_boost]: ... -def decomposeEssentialMat(E, R1=..., R2=..., t=...) -> tuple[_R1, _R2, _t]: ... - - -def decomposeHomographyMat( - H, K, rotations=..., translations=..., normals=..., -) -> tuple[Incomplete, _rotations, _translations, _normals]: ... - - -def decomposeProjectionMatrix( - projMatrix, - cameraMatrix=..., - rotMatrix=..., - transVect=..., - rotMatrixX=..., - rotMatrixY=..., - rotMatrixZ=..., - eulerAngles=..., -) -> tuple[ - _cameraMatrix, - _rotMatrix, - _transVect, - _rotMatrixX, - _rotMatrixY, - _rotMatrixZ, - _eulerAngles, -]: ... - - -def demosaicing(src: Mat, code: int, dst: Mat = ..., dstCn: int = ...) -> _dst: ... -def denoise_TVL1(observations, result, lambda_=..., niters=...) -> None: ... -def destroyAllWindows() -> None: ... -def destroyWindow(winname) -> None: ... -def detailEnhance(src: Mat, dst: Mat = ..., sigma_s=..., sigma_r=...) -> _dst: ... -def determinant(mtx): ... -def dft(src: Mat, dst: Mat = ..., flags: int | None = ..., nonzeroRows=...) -> _dst: ... -def dilate(src: Mat, kernel, dst: Mat = ..., anchor=..., iterations=..., borderType=..., borderValue=...) -> _dst: ... -def displayOverlay(winname, text, delayms=...) -> None: ... -def displayStatusBar(winname, text, delayms=...) -> None: ... -def distanceTransform(src: Mat, distanceType, maskSize, dst: Mat = ..., dstType=...) -> _dst: ... - - -def distanceTransformWithLabels( - src: Mat, distanceType, maskSize, dst: Mat = ..., labels=..., labelType=..., -) -> tuple[_dst, _labels]: ... -def divSpectrums(*args, **kwargs): ... # incomplete -@overload -def divide(src1: Mat, src2: Mat, dst: Mat = ..., scale=..., dtype=...) -> _dst: ... -@overload -def divide(scale, src2, dst=..., dtype=...) -> _dst: ... -def dnn_registerLayer() -> None: ... -def dnn_unregisterLayer() -> None: ... -def drawChessboardCorners(image: Mat, patternSize, corners, patternWasFound) -> _image: ... - - -def drawContours( - image: Mat, contours, contourIdx, color, thickness=..., lineType=..., hierarchy=..., maxLevel=..., offset=..., -) -> _image: ... -def drawFrameAxes(image: Mat, cameraMatrix, distCoeffs, rvec, tvec, length, thickness=...) -> _image: ... -def drawKeypoints(image: Mat, keypoints, outImage, color=..., flags: int | None = ...) -> _outImage: ... -def drawMarker(img: Mat, position, color, markerType=..., markerSize=..., thickness=..., line_type=...) -> _img: ... - - -def drawMatches( - img1, - keypoints1, - img2, - keypoints2, - matches1to2, - outImg, - matchColor=..., - singlePointColor=..., - matchesMask=..., - flags: int | None = ..., -) -> _outImg: ... - - -def drawMatchesKnn( - img1, - keypoints1, - img2, - keypoints2, - matches1to2, - outImg, - matchColor=..., - singlePointColor=..., - matchesMask=..., - flags: int | None = ..., -) -> _outImg: ... -def edgePreservingFilter(src: Mat, dst: Mat = ..., flags: int | None = ..., sigma_s=..., sigma_r=...) -> _dst: ... -def eigen(src: Mat, eigenvalues=..., eigenvectors=...) -> tuple[Incomplete, _eigenvalues, _eigenvectors]: ... -def eigenNonSymmetric(src: Mat, eigenvalues=..., eigenvectors=...) -> tuple[_eigenvalues, _eigenvectors]: ... - - -@overload -def ellipse( - img: Mat, - center, - axes, - angle, - startAngle, - endAngle, - color, - thickness=..., - lineType=..., - shift=..., -) -> _img: ... - - -@overload -def ellipse(img, box, color, thickness=..., lineType=...) -> _img: ... -def ellipse2Poly(center, axes, angle, arcStart, arcEnd, delta) -> _pts: ... -def empty_array_desc(*args, **kwargs): ... # incomplete -def empty_gopaque_desc(*args, **kwargs): ... # incomplete -def empty_scalar_desc(*args, **kwargs): ... # incomplete -def equalizeHist(src: Mat, dst: Mat = ...) -> _dst: ... -def erode(src: Mat, kernel, dst: Mat = ..., anchor=..., iterations=..., borderType=..., borderValue=...) -> _dst: ... - - -def estimateAffine2D( - from_, to, inliers=..., method: int = ..., ransacReprojThreshold=..., maxIters=..., confidence=..., refineIters=..., -) -> tuple[Incomplete, _inliers]: ... - - -def estimateAffine3D( - src: Mat, dst: Mat, out=..., inliers=..., ransacThreshold=..., confidence=..., -) -> tuple[Incomplete, _out, _inliers]: ... - - -def estimateAffinePartial2D( - from_, to, inliers=..., method: int = ..., ransacReprojThreshold=..., maxIters=..., confidence=..., refineIters=..., -) -> tuple[Incomplete, _inliers]: ... - - -def estimateChessboardSharpness( - image: Mat, patternSize, corners, rise_distance=..., vertical=..., sharpness=..., -) -> tuple[Incomplete, _sharpness]: ... - - -def estimateTranslation3D( - src: Mat, dst: Mat, out=..., inliers=..., ransacThreshold=..., confidence=..., -) -> tuple[Incomplete, _out, _inliers]: ... -def exp(src: Mat, dst: Mat = ...) -> _dst: ... -def extractChannel(src: Mat, coi, dst: Mat = ...) -> _dst: ... -def fastAtan2(y, x): ... -@overload -def fastNlMeansDenoising(src: Mat, dst: Mat = ..., h=..., templateWindowSize=..., searchWindowSize=...) -> _dst: ... -@overload -def fastNlMeansDenoising(src, h, dst=..., templateWindowSize=..., searchWindowSize=..., normType=...) -> _dst: ... - - -def fastNlMeansDenoisingColored( - src: Mat, dst: Mat = ..., h=..., hColor=..., templateWindowSize=..., searchWindowSize=..., -) -> _dst: ... - - -def fastNlMeansDenoisingColoredMulti( - srcImgs, - imgToDenoiseIndex, - temporalWindowSize, - dst: Mat = ..., - h=..., - hColor=..., - templateWindowSize=..., - searchWindowSize=..., -) -> _dst: ... - - -@overload -def fastNlMeansDenoisingMulti( - srcImgs, imgToDenoiseIndex, temporalWindowSize, dst: Mat = ..., h=..., templateWindowSize=..., searchWindowSize=..., -) -> _dst: ... - - -@overload -def fastNlMeansDenoisingMulti( - srcImgs, - imgToDenoiseIndex, - temporalWindowSize, - h, - dst=..., - templateWindowSize=..., - searchWindowSize=..., - normType=..., -) -> _dst: ... - - -def fillConvexPoly(img: Mat, points, color, lineType=..., shift=...) -> _img: ... -def fillPoly(img: Mat, pts, color, lineType=..., shift=..., offset=...) -> _img: ... -def filter2D(src: Mat, ddepth, kernel, dst: Mat = ..., anchor=..., delta=..., borderType=...) -> _dst: ... - - -def filterHomographyDecompByVisibleRefpoints( - rotations, normals, beforePoints, afterPoints, possibleSolutions=..., pointsMask=..., -) -> _possibleSolutions: ... -def filterSpeckles(img: Mat, newVal, maxSpeckleSize, maxDiff, buf=...) -> tuple[_img, _buf]: ... -def find4QuadCornerSubpix(img: Mat, corners, region_size) -> tuple[Incomplete, _corners]: ... - - -def findChessboardCorners( - image: Mat, - patternSize, - corners=..., - flags: int | None = ..., -) -> tuple[ - Incomplete, - _corners, -]: ... - - -def findChessboardCornersSB( - image: Mat, - patternSize, - corners=..., - flags: int | None = ..., -) -> tuple[ - Incomplete, - _corners, -]: ... - - -def findChessboardCornersSBWithMeta( - image: Mat, patternSize, flags: int | None, corners=..., meta=..., -) -> tuple[Incomplete, _corners, _meta]: ... - - -@overload -def findCirclesGrid( - image: Mat, patternSize, flags: int | None, blobDetector, parameters, centers=..., -) -> tuple[Incomplete, _centers]: ... -@overload -def findCirclesGrid(image, patternSize, centers=..., flags=..., blobDetector=...) -> tuple[Incomplete, _centers]: ... - - -def findContours( - image: Mat, - mode, - method: int, - contours=..., - hierarchy=..., - offset=..., -) -> tuple[ - _contours, - _hierarchy, -]: ... - - -@overload -def findEssentialMat( - points1, points2, cameraMatrix, method: int = ..., prob=..., threshold=..., mask: Mat = ..., -) -> tuple[Incomplete, _mask]: ... - - -@overload -def findEssentialMat( - points1, points2, focal=..., pp=..., method=..., prob=..., threshold=..., mask=..., -) -> tuple[Incomplete, _mask]: ... - - -@overload -def findFundamentalMat( - points1, points2, method: int, ransacReprojThreshold, confidence, maxIters, mask: Mat = ..., -) -> tuple[Incomplete, _mask]: ... - - -@overload -def findFundamentalMat( - points1, points2, method=..., ransacReprojThreshold=..., confidence=..., mask=..., -) -> tuple[Incomplete, _mask]: ... - - -def findHomography( - srcPoints, dstPoints, method: int = ..., ransacReprojThreshold=..., mask: Mat = ..., maxIters=..., confidence=..., -) -> tuple[Incomplete, _mask]: ... -def findNonZero(src: Mat, idx=...) -> _idx: ... - - -def findTransformECC( - templateImage, inputImage, warpMatrix, motionType, criteria, inputMask, gaussFiltSize, -) -> tuple[Incomplete, _warpMatrix]: ... -def fitEllipse(points): ... -def fitEllipseAMS(points): ... -def fitEllipseDirect(points): ... -def fitLine(points, distType, param, reps, aeps, line=...) -> _line: ... -def flip(src: Mat, flipCode, dst: Mat = ...) -> _dst: ... - - -def floodFill( - image: Mat, mask: Mat | None, seedPoint, newVal, loDiff=..., upDiff=..., flags: int | None = ..., -) -> tuple[Incomplete, _image, _mask, _rect]: ... -def gemm(src1: Mat, src2: Mat, alpha, src3, beta, dst: Mat = ..., flags: int | None = ...) -> _dst: ... -def getAffineTransform(src: Mat, dst: Mat): ... -def getBuildInformation(): ... -def getCPUFeaturesLine(): ... -def getCPUTickCount(): ... -def getDefaultNewCameraMatrix(cameraMatrix, imgsize=..., centerPrincipalPoint=...): ... -def getDerivKernels(dx, dy, ksize, kx=..., ky=..., normalize=..., ktype=...) -> tuple[_kx, _ky]: ... -def getFontScaleFromHeight(fontFace, pixelHeight, thickness=...): ... -def getGaborKernel(ksize, sigma, theta, lambd, gamma, psi=..., ktype=...): ... -def getGaussianKernel(ksize, sigma, ktype=...): ... -def getHardwareFeatureName(feature): ... -def getLogLevel(*args, **kwargs): ... # incomplete -def getNumThreads(): ... -def getNumberOfCPUs(): ... -def getOptimalDFTSize(vecsize): ... - - -def getOptimalNewCameraMatrix( - cameraMatrix, distCoeffs, imageSize, alpha, newImgSize=..., centerPrincipalPoint=..., -) -> tuple[Incomplete, _validPixROI]: ... -def getPerspectiveTransform(src: Mat, dst: Mat, solveMethod=...): ... -def getRectSubPix(image: Mat, patchSize, center, patch=..., patchType=...) -> _patch: ... -def getRotationMatrix2D(center, angle, scale): ... -def getStructuringElement(shape, ksize, anchor=...): ... -def getTextSize(text, fontFace, fontScale, thickness) -> tuple[Incomplete, _baseLine]: ... -def getThreadNum(): ... -def getTickCount(): ... -def getTickFrequency(): ... -def getTrackbarPos(trackbarname, winname): ... -def getValidDisparityROI(roi1, roi2, minDisparity, numberOfDisparities, blockSize): ... -def getVersionMajor(): ... -def getVersionMinor(): ... -def getVersionRevision(): ... -def getVersionString(): ... -def getWindowImageRect(winname): ... -def getWindowProperty(winname, prop_id): ... - - -@overload -def goodFeaturesToTrack( - image: Mat, - maxCorners, - qualityLevel, - minDistance, - corners=..., - mask: Mat = ..., - blockSize=..., - useHarrisDetector=..., - k=..., -) -> _corners: ... - - -@overload -def goodFeaturesToTrack( - image, - maxCorners, - qualityLevel, - minDistance, - mask, - blockSize, - gradientSize, - corners=..., - useHarrisDetector=..., - k=..., -) -> _corners: ... - - -def goodFeaturesToTrackWithQuality(*args, **kwargs): ... # incomplete - - -def grabCut( - img: Mat, - mask: Mat | None, - rect, - bgdModel, - fgdModel, - iterCount, - mode=..., -) -> tuple[ - _mask, - _bgdModel, - _fgdModel, -]: ... - - -def groupRectangles(rectList, groupThreshold, eps=...) -> tuple[_rectList, _weights]: ... -def haveImageReader(filename: str): ... -def haveImageWriter(filename: str): ... -def haveOpenVX(): ... -def hconcat(src: Mat | Sequence[Mat], dst: Mat = ...) -> _dst: ... -def idct(src: Mat, dst: Mat = ..., flags: int | None = ...) -> _dst: ... -def idft(src: Mat, dst: Mat = ..., flags: int | None = ..., nonzeroRows=...) -> _dst: ... -def illuminationChange(src: Mat, mask: Mat, dst: Mat = ..., alpha=..., beta=...) -> _dst: ... -def imcount(*args, **kwargs): ... # incomplete -def imdecode(buf, flags: int | None): ... -def imencode(ext, img: Mat, params=...) -> tuple[Incomplete, _buf]: ... -def imread(filename: str, flags: int | None = ...) -> Mat: ... -def imreadmulti(filename: str, mats=..., flags: int | None = ...) -> tuple[Incomplete, _mats]: ... -def imshow(winname, mat) -> None: ... -def imwrite(filename: str, img: Mat, params: Sequence[int] = ...) -> bool: ... -def imwritemulti(*args, **kwargs): ... # incomplete -def inRange(src: Mat, lowerBound: Mat, upperbBound: Mat, dst: Mat = ...) -> Mat: ... -def initCameraMatrix2D(objectPoints, imagePoints, imageSize, aspectRatio=...): ... -def initInverseRectificationMap(*args, **kwargs): ... # incomplete - - -def initUndistortRectifyMap( - cameraMatrix, distCoeffs, R, newCameraMatrix, size, m1type, map1=..., map2=..., -) -> tuple[_map1, _map2]: ... -def inpaint(src: Mat, inpaintMask, inpaintRadius, flags: int | None, dst: Mat = ...) -> _dst: ... -def insertChannel(src: Mat, dst: Mat, coi) -> _dst: ... -def integral(src: Mat, sum=..., sdepth=...) -> _sum: ... -def integral2(src: Mat, sum=..., sqsum=..., sdepth=..., sqdepth=...) -> tuple[_sum, _sqsum]: ... -def integral3(src: Mat, sum=..., sqsum=..., tilted=..., sdepth=..., sqdepth=...) -> tuple[_sum, _sqsum, _tilted]: ... -def intersectConvexConvex(_p1, _p2, _p12=..., handleNested=...) -> tuple[Incomplete, _p12]: ... -def invert(src: Mat, dst: Mat = ..., flags: int | None = ...) -> tuple[Incomplete, _dst]: ... -def invertAffineTransform(M, iM=...) -> _iM: ... -def isContourConvex(contour): ... - - -def kmeans( - data, K, bestLabels, criteria, attempts, flags: int | None, centers=..., -) -> tuple[Incomplete, _bestLabels, _centers]: ... -def line(img: Mat, pt1, pt2, color, thickness=..., lineType=..., shift=...) -> _img: ... -def linearPolar(src: Mat, center, maxRadius, flags: int | None, dst: Mat = ...) -> _dst: ... -def log(src: Mat, dst: Mat = ...) -> _dst: ... -def logPolar(src: Mat, center, M, flags: int | None, dst: Mat = ...) -> _dst: ... -def magnitude(x, y, magnitude=...) -> _magnitude: ... -def matMulDeriv(A, B, dABdA=..., dABdB=...) -> tuple[_dABdA, _dABdB]: ... -def matchShapes(contour1, contour2, method: int, parameter): ... -def matchTemplate(image: Mat, templ: Mat, method: int, result: Mat = ..., mask: Mat | None = ...) -> Mat: ... -def max(src1: Mat, src2: Mat, dst: Mat = ...) -> _dst: ... -def mean(src: Mat, mask: Mat = ...): ... -def meanShift(probImage, window, criteria) -> tuple[Incomplete, _window]: ... -def meanStdDev(src: Mat, mean=..., stddev=..., mask: Mat = ...) -> tuple[_mean, _stddev]: ... -def medianBlur(src: Mat, ksize, dst: Mat = ...) -> _dst: ... -def merge(mv, dst: Mat = ...) -> _dst: ... -def min(src1: Mat, src2: Mat, dst: Mat = ...) -> _dst: ... -def minAreaRect(points): ... -def minEnclosingCircle(points) -> tuple[_center, _radius]: ... -def minEnclosingTriangle(points, triangle=...) -> tuple[Incomplete, _triangle]: ... -def minMaxLoc(src: Mat, mask: Mat = ...) -> tuple[float, float, tuple[int, int], tuple[int, int]]: ... -def mixChannels(src: Mat, dst: Mat, fromTo) -> _dst: ... -def moments(array, binaryImage=...): ... - - -def morphologyEx( - src: Mat, - op, - kernel, - dst: Mat = ..., - anchor=..., - iterations=..., - borderType=..., - borderValue=..., -) -> _dst: ... - - -def moveWindow(winname, x, y) -> None: ... -def mulSpectrums(a, b, flags: int | None, c=..., conjB=...) -> _c: ... -def mulTransposed(src: Mat, aTa, dst: Mat = ..., delta=..., scale=..., dtype=...) -> _dst: ... -def multiply(src1: Mat, src2: Mat, dst: Mat = ..., scale=..., dtype=...) -> _dst: ... -def namedWindow(winname, flags: int | None = ...) -> None: ... -@overload -def norm(src1: Mat, src2: Mat, normType: int = ..., mask: Mat | None = ...) -> float: ... -@overload -def norm(src1: Mat, src2: Mat, mask: Mat | None = ...) -> float: ... -def normalize(src: Mat, dst: Mat, alpha=..., beta=..., norm_type: int = ..., dtype=..., mask: Mat = ...) -> Mat: ... -def patchNaNs(a, val=...) -> _a: ... - - -def pencilSketch( - src: Mat, dst1: Mat = ..., dst2: Mat = ..., sigma_s=..., sigma_r=..., shade_factor=..., -) -> tuple[_dst1, _dst2]: ... -def perspectiveTransform(src: Mat, m, dst: Mat = ...) -> _dst: ... -def phase(x, y, angle=..., angleInDegrees=...) -> _angle: ... -def phaseCorrelate(src1: Mat, src2: Mat, window=...) -> tuple[Incomplete, _response]: ... -def pointPolygonTest(contour, pt, measureDist): ... -def polarToCart(magnitude, angle, x=..., y=..., angleInDegrees=...) -> tuple[_x, _y]: ... -def pollKey(*args, **kwargs): ... # incomplete -def polylines(img: Mat, pts, isClosed, color, thickness=..., lineType=..., shift=...) -> _img: ... -def pow(src: Mat, power, dst: Mat = ...) -> _dst: ... -def preCornerDetect(src: Mat, ksize, dst: Mat = ..., borderType=...) -> _dst: ... - - -def projectPoints( - objectPoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints=..., jacobian=..., aspectRatio=..., -) -> tuple[_imagePoints, _jacobian]: ... - - -def putText( - img: Mat, - text, - org, - fontFace, - fontScale, - color, - thickness=..., - lineType=..., - bottomLeftOrigin=..., -) -> _img: ... - - -def pyrDown(src: Mat, dst: Mat = ..., dstsize=..., borderType=...) -> _dst: ... -def pyrMeanShiftFiltering(src: Mat, sp, sr, dst: Mat = ..., maxLevel=..., termcrit=...) -> _dst: ... -def pyrUp(src: Mat, dst: Mat = ..., dstsize=..., borderType=...) -> _dst: ... -def randShuffle(dst: Mat, iterFactor=...) -> _dst: ... -def randn(dst: Mat, mean, stddev) -> _dst: ... -def randu(dst: Mat, low, high) -> _dst: ... -def readOpticalFlow(path): ... -@overload -def recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask): ... - - -@overload -def recoverPose( - E, - points1, - points2, - cameraMatrix, - R=..., - t=..., - mask: Mat = ..., -) -> tuple[ - Incomplete, - _R, - _t, - _mask, -]: ... - - -@overload -def recoverPose(E, points1, points2, R=..., t=..., focal=..., pp=..., mask=...) -> tuple[Incomplete, _R, _t, _mask]: ... - - -@overload -def recoverPose( - E, points1, points2, cameraMatrix, distanceThresh, R=..., t=..., mask=..., triangulatedPoints=..., -) -> tuple[Incomplete, _R, _t, _mask, _triangulatedPoints]: ... -@overload -def rectangle(img: Mat, pt1: _Point, pt2: _Point, color, thickness=..., lineType=..., shift=...) -> Mat: ... -@overload -def rectangle(img: Mat, rec: _Rect, color, thickness=..., lineType=..., shift=...) -> Mat: ... - - -def rectify3Collinear( - cameraMatrix1, - distCoeffs1, - cameraMatrix2, - distCoeffs2, - cameraMatrix3, - distCoeffs3, - imgpt1, - imgpt3, - imageSize, - R12, - T12, - R13, - T13, - alpha, - newImgSize, - flags: int | None, - R1=..., - R2=..., - R3=..., - P1=..., - P2=..., - P3=..., - Q=..., -) -> tuple[Incomplete, _R1, _R2, _R3, _P1, _P2, _P3, _Q, _roi1, _roi2]: ... -def redirectError(onError) -> None: ... -def reduce(src: Mat, dim, rtype, dst: Mat = ..., dtype=...) -> _dst: ... -def reduceArgMax(*args, **kwargs): ... # incomplete -def reduceArgMin(*args, **kwargs): ... # incomplete -def remap(src: Mat, map1, map2, interpolation: int, dst: Mat = ..., borderMode=..., borderValue=...) -> _dst: ... -def repeat(src: Mat, ny, nx, dst: Mat = ...) -> _dst: ... -def reprojectImageTo3D(disparity, Q, _3dImage=..., handleMissingValues=..., ddepth=...) -> _3dImage: ... - - -def resize( - src: Mat | int | bool, - dsize: _Size | None, - dst: Mat | _NumericScalar = ..., - fx: float = ..., - fy: float = ..., - interpolation: int = ..., -) -> Mat: ... -@overload -def resizeWindow(winname, width, height) -> None: ... -@overload -def resizeWindow(winname, size) -> None: ... -def rotate(src: Mat, rotateCode, dst: Mat = ...) -> _dst: ... -def rotatedRectangleIntersection(rect1, rect2, intersectingRegion=...) -> tuple[Incomplete, _intersectingRegion]: ... -def sampsonDistance(pt1, pt2, F): ... -def scaleAdd(src1: Mat, alpha, src2: Mat, dst: Mat = ...) -> _dst: ... -def seamlessClone(src: Mat, dst: Mat, mask: Mat | None, p, flags: int | None, blend=...) -> _blend: ... -@overload -def selectROI(windowName, img: Mat, showCrosshair=..., fromCenter=...): ... -@overload -def selectROI(img: Mat, showCrosshair=..., fromCenter=...): ... -def selectROIs(windowName, img: Mat, showCrosshair=..., fromCenter=...) -> _boundingBoxes: ... -def sepFilter2D(src: Mat, ddepth, kernelX, kernelY, dst: Mat = ..., anchor=..., delta=..., borderType=...) -> _dst: ... -def setIdentity(mtx, s=...) -> _mtx: ... -def setLogLevel(*args, **kwargs): ... # incomplete -def setMouseCallback(windowName, onMouse, param=...) -> None: ... -def setNumThreads(nthreads) -> None: ... -def setRNGSeed(seed) -> None: ... -def setTrackbarMax(trackbarname, winname, maxval) -> None: ... -def setTrackbarMin(trackbarname, winname, minval) -> None: ... -def setTrackbarPos(trackbarname, winname, pos) -> None: ... -def setUseOpenVX(flag) -> None: ... -def setUseOptimized(onoff) -> None: ... -def setWindowProperty(winname, prop_id, prop_value) -> None: ... -def setWindowTitle(winname, title) -> None: ... -def solve(src1: Mat, src2: Mat, dst: Mat = ..., flags: int | None = ...) -> tuple[Incomplete, _dst]: ... -def solveCubic(coeffs, roots=...) -> tuple[Incomplete, _roots]: ... -def solveLP(Func, Constr, z=...) -> tuple[Incomplete, _z]: ... - - -def solveP3P( - objectPoints, imagePoints, cameraMatrix, distCoeffs, flags: int | None, rvecs=..., tvecs=..., -) -> tuple[Incomplete, _rvecs, _tvecs]: ... - - -def solvePnP( - objectPoints, - imagePoints, - cameraMatrix, - distCoeffs, - rvec=..., - tvec=..., - useExtrinsicGuess=..., - flags: int | None = ..., -) -> tuple[ - Incomplete, - _rvec, - _tvec, -]: ... - - -def solvePnPGeneric( - objectPoints, - imagePoints, - cameraMatrix, - distCoeffs, - rvecs=..., - tvecs=..., - useExtrinsicGuess=..., - flags: int | None = ..., - rvec=..., - tvec=..., - reprojectionError=..., -) -> tuple[Incomplete, _rvecs, _tvecs, _reprojectionError]: ... - - -def solvePnPRansac( - objectPoints, - imagePoints, - cameraMatrix, - distCoeffs, - rvec=..., - tvec=..., - useExtrinsicGuess=..., - iterationsCount=..., - reprojectionError=..., - confidence=..., - inliers=..., - flags: int | None = ..., -) -> tuple[Incomplete, _rvec, _tvec, _inliers]: ... - - -def solvePnPRefineLM( - objectPoints, - imagePoints, - cameraMatrix, - distCoeffs, - rvec, - tvec, - criteria=..., -) -> tuple[ - _rvec, - _tvec, -]: ... - - -def solvePnPRefineVVS( - objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, criteria=..., VVSlambda=..., -) -> tuple[_rvec, _tvec]: ... -def solvePoly(coeffs, roots=..., maxIters=...) -> tuple[Incomplete, _roots]: ... -def sort(src: Mat, flags: int | None, dst: Mat = ...) -> _dst: ... -def sortIdx(src: Mat, flags: int | None, dst: Mat = ...) -> _dst: ... -def spatialGradient(src: Mat, dx=..., dy=..., ksize=..., borderType=...) -> tuple[_dx, _dy]: ... -def split(m, mv=...) -> _mv: ... -def sqrBoxFilter(src: Mat, ddepth, ksize, dst: Mat = ..., anchor=..., normalize=..., borderType=...) -> _dst: ... -def sqrt(src: Mat, dst: Mat = ...) -> _dst: ... -def startWindowThread(): ... - - -def stereoCalibrate( - objectPoints, - imagePoints1, - imagePoints2, - cameraMatrix1, - distCoeffs1, - cameraMatrix2, - distCoeffs2, - imageSize, - R=..., - T=..., - E=..., - F=..., - flags: int | None = ..., - criteria=..., -) -> tuple[Incomplete, _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2, _R, _T, _E, _F]: ... - - -def stereoCalibrateExtended( - objectPoints, - imagePoints1, - imagePoints2, - cameraMatrix1, - distCoeffs1, - cameraMatrix2, - distCoeffs2, - imageSize, - R, - T, - E=..., - F=..., - perViewErrors=..., - flags: int | None = ..., - criteria=..., -) -> tuple[Incomplete, _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2, _R, _T, _E, _F, _perViewErrors]: ... - - -def stereoRectify( - cameraMatrix1, - distCoeffs1, - cameraMatrix2, - distCoeffs2, - imageSize, - R, - T, - R1=..., - R2=..., - P1=..., - P2=..., - Q=..., - flags: int | None = ..., - alpha=..., - newImageSize=..., -) -> tuple[_R1, _R2, _P1, _P2, _Q, _validPixROI1, _validPixROI2]: ... - - -def stereoRectifyUncalibrated( - points1, - points2, - F, - imgSize, - H1=..., - H2=..., - threshold=..., -) -> tuple[ - Incomplete, - _H1, - _H2, -]: ... - - -def stylization(src: Mat, dst: Mat = ..., sigma_s=..., sigma_r=...) -> _dst: ... - - -def subtract( - src1: Mat | _NumericScalar, - src2: Mat | _NumericScalar, - dst: Mat = ..., - mask: Mat = ..., - dtype=..., -) -> _dst: ... - - -def sumElems(src): ... - - -def textureFlattening( - src: Mat, - mask: Mat, - dst: Mat = ..., - low_threshold=..., - high_threshold=..., - kernel_size=..., -) -> _dst: ... - - -def threshold(src: Mat, thresh, maxval, type, dst: Mat = ...) -> tuple[Incomplete, _dst]: ... -def trace(mtx): ... -def transform(src: Mat, m, dst: Mat = ...) -> _dst: ... -def transpose(src: Mat, dst: Mat = ...) -> _dst: ... -def triangulatePoints(projMatr1, projMatr2, projPoints1, projPoints2, points4D=...) -> _points4D: ... -def undistort(src: Mat, cameraMatrix, distCoeffs, dst: Mat = ..., newCameraMatrix=...) -> _dst: ... -def undistortPoints(src: Mat, cameraMatrix, distCoeffs, dst: Mat = ..., R=..., P=...) -> _dst: ... -def undistortPointsIter(src: Mat, cameraMatrix, distCoeffs, R, P, criteria, dst: Mat = ...) -> _dst: ... -def useOpenVX(): ... -def useOptimized(): ... -def validateDisparity(disparity, cost, minDisparity, numberOfDisparities, disp12MaxDisp=...) -> _disparity: ... -def vconcat(src: Mat | Sequence[Mat], dst: Mat = ...) -> Mat: ... -def waitKey(delay=...): ... -def waitKeyEx(delay=...): ... - - -def warpAffine( - src: Mat, M, dsize: _Size, _dst: Mat = ..., _flags: int | None = ..., _borderMode=..., _borderValue=..., -) -> _dst: ... - - -def warpPerspective( - src: Mat, M, dsize: _Size, _dst: Mat = ..., _flags: int | None = ..., _borderMode=..., _borderValue=..., -) -> _dst: ... -def warpPolar(src: Mat, dsize: _Size, _center, _maxRadius, _flags: int | None, _dst: Mat = ...) -> _dst: ... -def watershed(image: Mat, markers) -> _markers: ... -def writeOpticalFlow(path, flow): ... diff --git a/typings/cv2/gapi/__init__.pyi b/typings/cv2/gapi/__init__.pyi new file mode 100644 index 00000000..7b58de64 --- /dev/null +++ b/typings/cv2/gapi/__init__.pyi @@ -0,0 +1,91 @@ +class GOpaque: + def __new__(cls, argtype): ... + + class Bool: + def __new__(self): ... + + class Int: + def __new__(self): ... + + class Double: + def __new__(self): ... + + class Float: + def __new__(self): ... + + class String: + def __new__(self): ... + + class Point: + def __new__(self): ... + + class Point2f: + def __new__(self): ... + + class Point3f: + def __new__(self): ... + + class Size: + def __new__(self): ... + + class Rect: + def __new__(self): ... + + class Prim: + def __new__(self): ... + + class Any: + def __new__(self): ... + + +class GArray: + def __new__(cls, argtype): ... + + class Bool: + def __new__(self): ... + + class Int: + def __new__(self): ... + + class Double: + def __new__(self): ... + + class Float: + def __new__(self): ... + + class String: + def __new__(self): ... + + class Point: + def __new__(self): ... + + class Point2f: + def __new__(self): ... + + class Point3f: + def __new__(self): ... + + class Size: + def __new__(self): ... + + class Rect: + def __new__(self): ... + + class Scalar: + def __new__(self): ... + + class MatLike: + def __new__(self): ... + + class GMat: + def __new__(self): ... + + class Prim: + def __new__(self): ... + + class Any: + def __new__(self): ... + + +def op(op_id, in_types, out_types): ... +def kernel(op_cls): ... diff --git a/typings/cv2/mat_wrapper/__init__.pyi b/typings/cv2/mat_wrapper/__init__.pyi index 15d667e9..a3acfde5 100644 --- a/typings/cv2/mat_wrapper/__init__.pyi +++ b/typings/cv2/mat_wrapper/__init__.pyi @@ -1,21 +1,15 @@ -from __future__ import annotations - -import numpy as np -from typing_extensions import TypeAlias - -_Unused: TypeAlias = object +from _typeshed import Unused +from cv2.typing import _NDArray __all__: list[str] = [] -_NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]] - -# TODO: Make Mat generic with int or float +# TODO: Make MatLike generic with int or float -class Mat(_NDArray): +class MatLike(_NDArray): wrap_channels: bool | None - def __new__(cls, arr: _NDArray, wrap_channels: bool = ..., **kwargs: _Unused) -> _NDArray: ... + def __new__(cls, arr: _NDArray, wrap_channels: bool = ..., **kwargs: Unused) -> _NDArray: ... def __init__(self, arr: _NDArray, wrap_channels: bool = ...) -> None: ... def __array_finalize__(self, obj: _NDArray | None) -> None: ... diff --git a/typings/cv2/typing.pyi b/typings/cv2/typing.pyi new file mode 100644 index 00000000..7f0cbd33 --- /dev/null +++ b/typings/cv2/typing.pyi @@ -0,0 +1,34 @@ +from collections.abc import Sequence + +import numpy as np +from cv2.mat_wrapper import MatLike as WrappedMat +from typing_extensions import TypeAlias + +_NDArray: TypeAlias = np.ndarray[float, np.dtype[np.generic]] +MatLike: TypeAlias = WrappedMat | _NDArray + +# Convertable to boolean +Boolean: TypeAlias = bool | int | None +# "a scalar" +NumericScalar: TypeAlias = float | bool | None +# cv::Scalar +Scalar: TypeAlias = MatLike | NumericScalar | Sequence[NumericScalar] +# cv::TermCriteria +TermCriteria: TypeAlias = tuple[int, int, float] | Sequence[float] +# cv::Point +Point: TypeAlias = tuple[int, int] | Sequence[int] +# cv::Size +Size: TypeAlias = tuple[int, int] | Sequence[int] +# cv::Range +Range: TypeAlias = tuple[int, int] | Sequence[int] +# cv::Point +Point2f: TypeAlias = tuple[float, float] | Sequence[float] +# cv::Size +SizeFloat: TypeAlias = tuple[float, float] | Sequence[float] +# cv::Rect +Rect: TypeAlias = tuple[int, int, int, int] | Sequence[int] +# cv::Rect +RectFloat: TypeAlias = tuple[int, int, int, int] | Sequence[int] +# cv::RotatedRect +RotatedRect: TypeAlias = tuple[Point2f, SizeFloat, float] | Sequence[Point2f | SizeFloat | float] +RotatedRectResult: TypeAlias = tuple[tuple[float, float], tuple[float, float], float] diff --git a/typings/multiprocessing/connection.pyi b/typings/multiprocessing/connection.pyi new file mode 100644 index 00000000..1cc33506 --- /dev/null +++ b/typings/multiprocessing/connection.pyi @@ -0,0 +1,43 @@ +# https://github.com/python/typeshed/blob/main/stdlib/multiprocessing/connection.pyi +import sys +from types import TracebackType +from typing import Any, Generic, SupportsIndex, TypeVar + +from _typeshed import ReadableBuffer +from typing_extensions import Self + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") + + +class _ConnectionBase(Generic[_T1, _T2]): + def __init__(self, handle: SupportsIndex, readable: bool = True, writable: bool = True) -> None: ... + @property + def closed(self) -> bool: ... # undocumented + @property + def readable(self) -> bool: ... # undocumented + @property + def writable(self) -> bool: ... # undocumented + def fileno(self) -> int: ... + def close(self) -> None: ... + def send_bytes(self, buf: ReadableBuffer, offset: int = 0, size: int | None = None) -> None: ... + def send(self, obj: _T1) -> None: ... + def recv_bytes(self, maxlength: int | None = None) -> bytes: ... + def recv_bytes_into(self, buf: Any, offset: int = 0) -> int: ... + def recv(self) -> _T2: ... + def poll(self, timeout: float | None = 0.0) -> bool: ... + def __enter__(self) -> Self: ... + + def __exit__( + self, exc_type: type[BaseException] | None, exc_value: BaseException | None, exc_tb: TracebackType | None, + ) -> None: ... + + +class Connection(_ConnectionBase[_T1, _T2]): ... + + +if sys.platform == "win32": + class PipeConnection(_ConnectionBase[_T1, _T2]): ... + def Pipe(duplex=True) -> tuple[PipeConnection[_T1, _T2], PipeConnection[_T2, _T1]]: ... +else: + def Pipe(duplex: bool = True) -> tuple[Connection[_T1, _T2], Connection[_T2, _T1]]: ... diff --git a/typings/multiprocessing/py.typed b/typings/multiprocessing/py.typed new file mode 100644 index 00000000..b648ac92 --- /dev/null +++ b/typings/multiprocessing/py.typed @@ -0,0 +1 @@ +partial diff --git a/typings/multiprocessing/test_cases/check_pipe_connections.py b/typings/multiprocessing/test_cases/check_pipe_connections.py new file mode 100644 index 00000000..5c55de0a --- /dev/null +++ b/typings/multiprocessing/test_cases/check_pipe_connections.py @@ -0,0 +1,25 @@ +from __future__ import annotations + +from multiprocessing.connection import Pipe, PipeConnection + +# Less type-safe, but no extra variable. User could mix up send and recv types. +a: PipeConnection[str, int] +b: PipeConnection[int, str] +a, b = Pipe() + +# More type safe, but extra variable +connections_wrong: tuple[ + PipeConnection[str, int], PipeConnection[str, int], +] = Pipe() # pyright: ignore[reportGeneralTypeIssues] +connections_ok: tuple[PipeConnection[str, int], PipeConnection[int, str]] = Pipe() +a, b = connections_ok + +a.send("test") +a.send(0) # pyright: ignore[reportGeneralTypeIssues] +test: str = b.recv() +test2: int = b.recv() # pyright: ignore[reportGeneralTypeIssues] + +b.send("test") # pyright: ignore[reportGeneralTypeIssues] +b.send(0) +test: str = a.recv() # pyright: ignore[reportGeneralTypeIssues] +test2: int = a.recv()