Skip to content

Commit

Permalink
Upate OpenVINO to PyTorch 2.6 and fix mismatched shapes error on too …
Browse files Browse the repository at this point in the history
…many resolution changes
  • Loading branch information
Disty0 committed Feb 8, 2025
1 parent e578afc commit 1acbabb
Show file tree
Hide file tree
Showing 6 changed files with 34 additions and 20 deletions.
4 changes: 2 additions & 2 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,13 @@
- set in *settings -> user interface -> language*
- [localization](https://github.com/vladmandic/wiki/Locale) documentation
- **Torch**:
- for **zluda** set default to `torch==2.6.0+cu126`
- for **zluda** set default to `torch==2.6.0+cu118`
- for **openvino** set default to `torch==2.6.0+cpu` and `openvino==2025.0.0`
- **Other**:
- asymmetric tiling
allows for configurable image tiling for x/y axis separately
enable in *scripts -> asymmetric tiling*
*note*: traditional symmetric tiling is achieved by setting circular mode for both x and y
- update openvino to `2025.0.0`
- **UI**:
- force browser cache-invalidate on page load
- use correct timezone for log display
Expand Down
7 changes: 3 additions & 4 deletions installer.py
Original file line number Diff line number Diff line change
Expand Up @@ -726,13 +726,12 @@ def install_ipex(torch_command):

def install_openvino(torch_command):
t_start = time.time()
# Python 3.12: RuntimeError: Dynamo is not supported on Python 3.12+
check_python(supported_minors=[9, 10, 11], reason='OpenVINO backend requires a Python version from 3.9, 3.10 or 3.11')
check_python(supported_minors=[9, 10, 11, 12], reason='OpenVINO backend requires a Python version between 3.9 and 3.12')
log.info('OpenVINO: selected')
if sys.platform == 'darwin':
torch_command = os.environ.get('TORCH_COMMAND', 'torch==2.3.1 torchvision==0.18.1')
torch_command = os.environ.get('TORCH_COMMAND', 'torch==2.6.0 torchvision==0.21.0')
else:
torch_command = os.environ.get('TORCH_COMMAND', 'torch==2.3.1+cpu torchvision==0.18.1+cpu --index-url https://download.pytorch.org/whl/cpu')
torch_command = os.environ.get('TORCH_COMMAND', 'torch==2.6.0+cpu torchvision==0.21.0+cpu --index-url https://download.pytorch.org/whl/cpu')

install(os.environ.get('OPENVINO_COMMAND', 'openvino==2025.0.0'), 'openvino')
install(os.environ.get('NNCF_COMMAND', 'nncf==2.15.0'), 'nncf')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def load_model(device, model_path, model_type="dpt_large_384", optimize=True, he
network input
"""
if "openvino" in model_type:
from openvino.runtime import Core
from openvino import Core

keep_aspect_ratio = not square

Expand Down
26 changes: 18 additions & 8 deletions modules/intel/openvino/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
import torch
import nncf

from openvino.frontend import FrontEndManager
from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder
from openvino.frontend.pytorch.torchdynamo.partition import Partitioner
from openvino.runtime import Core, Type, PartialShape, serialize
from openvino.frontend.pytorch.fx_decoder import TorchFXPythonDecoder
from openvino.frontend import FrontEndManager
from openvino import Core, Type, PartialShape, serialize
from openvino.properties import hint as ov_hints

from torch._dynamo.backends.common import fake_tensor_unsupported
Expand All @@ -23,6 +23,11 @@
from modules import shared, devices, sd_models


torch._dynamo.eval_frame.check_if_dynamo_supported = lambda: True # pylint: disable=protected-access
if hasattr(torch._dynamo.config, "inline_inbuilt_nn_modules"):
torch._dynamo.config.inline_inbuilt_nn_modules = False # pylint: disable=protected-access


DEFAULT_OPENVINO_PYTHON_CONFIG = MappingProxyType(
{
"use_python_fusion_cache": True,
Expand Down Expand Up @@ -114,9 +119,9 @@ def cached_model_name(model_hash_str, device, args, cache_root, reversed = False
for input_data in args:
if isinstance(input_data, torch.SymInt):
if reversed:
inputs_str = "_" + "torch.SymInt1" + inputs_str
inputs_str = "_" + "torch.SymInt[]" + inputs_str
else:
inputs_str += "_" + "torch.SymInt1"
inputs_str += "_" + "torch.SymInt[]"
elif isinstance(input_data, int):
pass
else:
Expand Down Expand Up @@ -176,7 +181,7 @@ def openvino_compile(gm: GraphModule, *example_inputs, model_hash_str: str = Non
for input_data in example_inputs:
if isinstance(input_data, torch.SymInt):
input_types.append(torch.SymInt)
input_shapes.append(torch.Size([1]))
input_shapes.append(torch.Size([]))
elif isinstance(input_data, int):
pass
else:
Expand Down Expand Up @@ -426,9 +431,8 @@ def get_subgraph_type(tensor):
return tensor


@register_backend
@fake_tensor_unsupported
def openvino_fx(subgraph, example_inputs):
def openvino_fx(subgraph, example_inputs, options=None):
global dont_use_4bit_nncf
global dont_use_nncf
global dont_use_quant
Expand Down Expand Up @@ -528,6 +532,8 @@ def _call(*args):
for node in model.graph.nodes:
if node.target == torch.ops.aten.mul_.Tensor:
node.target = torch.ops.aten.mul.Tensor
elif node.target == torch.ops.aten._unsafe_index.Tensor:
node.target = torch.ops.aten.index.Tensor
with devices.inference_context():
model.eval()
partitioner = Partitioner(options=None)
Expand All @@ -543,3 +549,7 @@ def _call(*args):
res = execute(compiled_model, *args, executor="openvino", executor_parameters=executor_parameters, file_name=maybe_fs_cached_name)
return res
return _call


if "openvino_fx" not in torch.compiler.list_backends():
register_backend(compiler_fn=openvino_fx, name="openvino_fx")
7 changes: 4 additions & 3 deletions modules/sd_models_compile.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,6 @@ def ipex_optimize_model(model, op=None, sd_model=None): # pylint: disable=unused
def optimize_openvino(sd_model):
try:
from modules.intel.openvino import openvino_fx # pylint: disable=unused-import
torch._dynamo.eval_frame.check_if_dynamo_supported = lambda: True # pylint: disable=protected-access
if shared.compiled_model_state is not None:
shared.compiled_model_state.compiled_cache.clear()
shared.compiled_model_state.req_cache.clear()
Expand Down Expand Up @@ -164,13 +163,15 @@ def torch_compile_model(model, op=None, sd_model=None): # pylint: disable=unused
model = torch.compile(model.to(devices.device),
mode=shared.opts.cuda_compile_mode,
backend=shared.opts.cuda_compile_backend,
fullgraph=shared.opts.cuda_compile_fullgraph
fullgraph=shared.opts.cuda_compile_fullgraph,
dynamic=None if shared.opts.cuda_compile_backend != "openvino_fx" else False,
).to(return_device)
else:
model = torch.compile(model,
mode=shared.opts.cuda_compile_mode,
backend=shared.opts.cuda_compile_backend,
fullgraph=shared.opts.cuda_compile_fullgraph
fullgraph=shared.opts.cuda_compile_fullgraph,
dynamic=None if shared.opts.cuda_compile_backend != "openvino_fx" else False,
)
devices.torch_gc()
return model
Expand Down
8 changes: 6 additions & 2 deletions modules/upscaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ def compile_upscaler(model):

if shared.opts.cuda_compile_backend == "openvino_fx":
from modules.intel.openvino import openvino_fx # pylint: disable=unused-import
torch._dynamo.eval_frame.check_if_dynamo_supported = lambda: True # pylint: disable=protected-access

log_level = logging.WARNING if shared.opts.cuda_compile_verbose else logging.CRITICAL # pylint: disable=protected-access
if hasattr(torch, '_logging'):
Expand All @@ -206,7 +205,12 @@ def compile_upscaler(model):
shared.log.error(f"Torch inductor config error: {e}")

t0 = time.time()
model = torch.compile(model, mode=shared.opts.cuda_compile_mode, backend=shared.opts.cuda_compile_backend, fullgraph=shared.opts.cuda_compile_fullgraph) # pylint: disable=attribute-defined-outside-init
model = torch.compile(model,
mode=shared.opts.cuda_compile_mode,
backend=shared.opts.cuda_compile_backend,
fullgraph=shared.opts.cuda_compile_fullgraph,
dynamic=None if shared.opts.cuda_compile_backend != "openvino_fx" else False,
) # pylint: disable=attribute-defined-outside-init
setup_logging() # compile messes with logging so reset is needed
t1 = time.time()
shared.log.info(f"Upscaler compile: time={t1-t0:.2f}")
Expand Down

0 comments on commit 1acbabb

Please sign in to comment.