[uplift] Switch to new dylib structure for ttmlir (#1154) #468
590 tests run, 30 passed, 125 skipped, 435 failed.
Annotations
Check failure on line 77 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_less[shape_x0-shape_y0]
RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 28, 28), shape_y = (1, 128, 28, 28)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_less(shape_x, shape_y):
class Less(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.less(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = Less()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:77:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb7f0794b50>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0...e-01, 6.38527e-01],
[4.80435e-01, 5.63519e-01, 1.80081e-01, ..., 6.29593e-01, 1.39392e-01, 3.02127e-01]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0...e-01, 6.38527e-01],
[4.80435e-01, 5.63519e-01, 1.80081e-01, ..., 6.29593e-01, 1.39392e-01, 3.02127e-01]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 77 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_less[shape_x2-shape_y2]
RuntimeError: Fatal error
Raw output
shape_x = (1, 256, 28, 28), shape_y = (1, 256, 28, 28)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_less(shape_x, shape_y):
class Less(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.less(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = Less()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:77:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb7f08ba710>
inputs = (tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02, ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
[1.85231e-0... 0.67269, ..., 0.52853, 0.02791, 0.72723],
[0.50627, 0.20671, 0.06682, ..., 0.64299, 0.17580, 0.76558]]]]))
inputs_and_parameters = [tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02, ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
[1.85231e-0... 0.67269, ..., 0.52853, 0.02791, 0.72723],
[0.50627, 0.20671, 0.06682, ..., 0.64299, 0.17580, 0.76558]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 77 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_less[shape_x4-shape_y4]
RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 56, 56), shape_y = (1, 128, 56, 56)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_less(shape_x, shape_y):
class Less(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.less(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = Less()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:77:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb7f08ba1d0>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.56755, 0.83524, 0.20560],
[0.59317, 0.11235, 0.15346, ..., 0... 0.24537, ..., 0.20951, 0.17554, 0.22976],
[0.29514, 0.97587, 0.57722, ..., 0.83887, 0.36270, 0.54569]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.56755, 0.83524, 0.20560],
[0.59317, 0.11235, 0.15346, ..., 0... 0.24537, ..., 0.20951, 0.17554, 0.22976],
[0.29514, 0.97587, 0.57722, ..., 0.83887, 0.36270, 0.54569]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 77 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_less[shape_x6-shape_y6]
RuntimeError: Fatal error
Raw output
shape_x = (1, 512, 7, 7), shape_y = (1, 512, 7, 7)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_less(shape_x, shape_y):
class Less(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.less(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = Less()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:77:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fce7c40>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.30742, 0.63408, 0.49009],
[0.89644, 0.45563, 0.63231, ..., 0... 0.53460, ..., 0.32252, 0.38427, 0.26611],
[0.86218, 0.91274, 0.66447, ..., 0.31686, 0.59966, 0.20105]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.30742, 0.63408, 0.49009],
[0.89644, 0.45563, 0.63231, ..., 0... 0.53460, ..., 0.32252, 0.38427, 0.26611],
[0.86218, 0.91274, 0.66447, ..., 0.31686, 0.59966, 0.20105]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 110 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_greater[shape_x0-shape_y0]
RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 28, 28), shape_y = (1, 128, 28, 28)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_greater(shape_x, shape_y):
class Greater(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.greater(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = Greater()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:110:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fc4c220>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0...e-01, 6.38527e-01],
[4.80435e-01, 5.63519e-01, 1.80081e-01, ..., 6.29593e-01, 1.39392e-01, 3.02127e-01]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0...e-01, 6.38527e-01],
[4.80435e-01, 5.63519e-01, 1.80081e-01, ..., 6.29593e-01, 1.39392e-01, 3.02127e-01]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 110 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_greater[shape_x2-shape_y2]
RuntimeError: Fatal error
Raw output
shape_x = (1, 256, 28, 28), shape_y = (1, 256, 28, 28)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_greater(shape_x, shape_y):
class Greater(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.greater(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = Greater()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:110:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb7f08bb4f0>
inputs = (tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02, ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
[1.85231e-0... 0.67269, ..., 0.52853, 0.02791, 0.72723],
[0.50627, 0.20671, 0.06682, ..., 0.64299, 0.17580, 0.76558]]]]))
inputs_and_parameters = [tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02, ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
[1.85231e-0... 0.67269, ..., 0.52853, 0.02791, 0.72723],
[0.50627, 0.20671, 0.06682, ..., 0.64299, 0.17580, 0.76558]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 110 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_greater[shape_x4-shape_y4]
RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 56, 56), shape_y = (1, 128, 56, 56)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_greater(shape_x, shape_y):
class Greater(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.greater(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = Greater()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:110:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fce6cb0>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.56755, 0.83524, 0.20560],
[0.59317, 0.11235, 0.15346, ..., 0... 0.24537, ..., 0.20951, 0.17554, 0.22976],
[0.29514, 0.97587, 0.57722, ..., 0.83887, 0.36270, 0.54569]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.56755, 0.83524, 0.20560],
[0.59317, 0.11235, 0.15346, ..., 0... 0.24537, ..., 0.20951, 0.17554, 0.22976],
[0.29514, 0.97587, 0.57722, ..., 0.83887, 0.36270, 0.54569]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 110 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_greater[shape_x6-shape_y6]
RuntimeError: Fatal error
Raw output
shape_x = (1, 512, 7, 7), shape_y = (1, 512, 7, 7)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_greater(shape_x, shape_y):
class Greater(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.greater(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = Greater()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:110:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb7f0795960>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.30742, 0.63408, 0.49009],
[0.89644, 0.45563, 0.63231, ..., 0... 0.53460, ..., 0.32252, 0.38427, 0.26611],
[0.86218, 0.91274, 0.66447, ..., 0.31686, 0.59966, 0.20105]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.30742, 0.63408, 0.49009],
[0.89644, 0.45563, 0.63231, ..., 0... 0.53460, ..., 0.32252, 0.38427, 0.26611],
[0.86218, 0.91274, 0.66447, ..., 0.31686, 0.59966, 0.20105]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 143 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_not_equal[shape_x1-shape_y1]
RuntimeError: Fatal error
Raw output
shape_x = (1, 64, 28, 28), shape_y = (1, 64, 28, 28)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_not_equal(shape_x, shape_y):
class NotEqual(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.ne(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = NotEqual()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:143:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fce1e70>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0... 0.33696, ..., 0.07847, 0.66520, 0.74933],
[0.38415, 0.30600, 0.06413, ..., 0.89975, 0.98978, 0.35272]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0... 0.33696, ..., 0.07847, 0.66520, 0.74933],
[0.38415, 0.30600, 0.06413, ..., 0.89975, 0.98978, 0.35272]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 143 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_not_equal[shape_x3-shape_y3]
RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 14, 14), shape_y = (1, 128, 14, 14)
@pytest.mark.parametrize(
"shape_x, shape_y",
[
((1, 128, 28, 28), (1, 128, 28, 28)),
((1, 64, 28, 28), (1, 64, 28, 28)),
((1, 256, 28, 28), (1, 256, 28, 28)),
((1, 128, 14, 14), (1, 128, 14, 14)),
((1, 128, 56, 56), (1, 128, 56, 56)),
((1, 32, 64, 64), (1, 32, 64, 64)),
((1, 512, 7, 7), (1, 512, 7, 7)),
((1, 32, 32, 32), (1, 32, 32, 32)),
],
)
@pytest.mark.push
def test_not_equal(shape_x, shape_y):
class NotEqual(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.ne(x, y)
x = torch.rand(shape_x)
y = torch.rand(shape_y)
inputs = [x, y]
framework_model = NotEqual()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:143:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fc9e5c0>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.40172, 0.02233, 0.16886],
[0.29389, 0.51852, 0.69767, ..., 0... 0.86184, ..., 0.05997, 0.37089, 0.73024],
[0.12554, 0.51805, 0.53460, ..., 0.31686, 0.59966, 0.20105]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.40172, 0.02233, 0.16886],
[0.29389, 0.51852, 0.69767, ..., 0... 0.86184, ..., 0.05997, 0.37089, 0.73024],
[0.12554, 0.51805, 0.53460, ..., 0.31686, 0.59966, 0.20105]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_equal[shape1]
RuntimeError: Fatal error
Raw output
shape = (1, 64, 28, 28)
@pytest.mark.parametrize(
"shape",
[
(1, 128, 28, 28),
(1, 64, 28, 28),
(1, 256, 28, 28),
(1, 128, 14, 14),
(1, 128, 56, 56),
(1, 32, 64, 64),
(1, 512, 7, 7),
(1, 32, 32, 32),
(128, 28, 28),
(64, 28, 28),
(256, 28, 28),
(128, 14, 14),
(128, 56, 56),
(32, 64, 64),
(512, 7, 7),
(32, 32, 32),
(128, 28),
(64, 28),
(256, 28),
(128, 14),
(128, 56),
(32, 64),
(512, 7),
(32, 32),
],
)
@pytest.mark.push
def test_equal(shape):
class Equal(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.eq(x, y)
x = torch.rand(shape)
y = x * 2.0
inputs = [x, y]
framework_model = Equal()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fca7dc0>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0... 1.07814, ..., 1.10187, 1.32274, 0.26336],
[0.98204, 0.81614, 1.72369, ..., 0.63371, 1.19932, 0.40210]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0... 1.07814, ..., 1.10187, 1.32274, 0.26336],
[0.98204, 0.81614, 1.72369, ..., 0.63371, 1.19932, 0.40210]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_equal[shape3]
RuntimeError: Fatal error
Raw output
shape = (1, 128, 14, 14)
@pytest.mark.parametrize(
"shape",
[
(1, 128, 28, 28),
(1, 64, 28, 28),
(1, 256, 28, 28),
(1, 128, 14, 14),
(1, 128, 56, 56),
(1, 32, 64, 64),
(1, 512, 7, 7),
(1, 32, 32, 32),
(128, 28, 28),
(64, 28, 28),
(256, 28, 28),
(128, 14, 14),
(128, 56, 56),
(32, 64, 64),
(512, 7, 7),
(32, 32, 32),
(128, 28),
(64, 28),
(256, 28),
(128, 14),
(128, 56),
(32, 64),
(512, 7),
(32, 32),
],
)
@pytest.mark.push
def test_equal(shape):
class Equal(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.eq(x, y)
x = torch.rand(shape)
y = x * 2.0
inputs = [x, y]
framework_model = Equal()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fb72530>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.40172, 0.02233, 0.16886],
[0.29389, 0.51852, 0.69767, ..., 0... 0.18371, ..., 0.59819, 0.52339, 0.93409],
[1.53317, 1.88780, 0.23916, ..., 0.48676, 1.18056, 1.04700]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.40172, 0.02233, 0.16886],
[0.29389, 0.51852, 0.69767, ..., 0... 0.18371, ..., 0.59819, 0.52339, 0.93409],
[1.53317, 1.88780, 0.23916, ..., 0.48676, 1.18056, 1.04700]]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_equal[shape9]
RuntimeError: Fatal error
Raw output
shape = (64, 28, 28)
@pytest.mark.parametrize(
"shape",
[
(1, 128, 28, 28),
(1, 64, 28, 28),
(1, 256, 28, 28),
(1, 128, 14, 14),
(1, 128, 56, 56),
(1, 32, 64, 64),
(1, 512, 7, 7),
(1, 32, 32, 32),
(128, 28, 28),
(64, 28, 28),
(256, 28, 28),
(128, 14, 14),
(128, 56, 56),
(32, 64, 64),
(512, 7, 7),
(32, 32, 32),
(128, 28),
(64, 28),
(256, 28),
(128, 14),
(128, 56),
(32, 64),
(512, 7),
(32, 32),
],
)
@pytest.mark.push
def test_equal(shape):
class Equal(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.eq(x, y)
x = torch.rand(shape)
y = x * 2.0
inputs = [x, y]
framework_model = Equal()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fd0e650>
inputs = (tensor([[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0.5...0, 1.07814, ..., 1.10187, 1.32274, 0.26336],
[0.98204, 0.81614, 1.72369, ..., 0.63371, 1.19932, 0.40210]]]))
inputs_and_parameters = [tensor([[[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0.5...0, 1.07814, ..., 1.10187, 1.32274, 0.26336],
[0.98204, 0.81614, 1.72369, ..., 0.63371, 1.19932, 0.40210]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_equal[shape10]
RuntimeError: Fatal error
Raw output
shape = (256, 28, 28)
@pytest.mark.parametrize(
"shape",
[
(1, 128, 28, 28),
(1, 64, 28, 28),
(1, 256, 28, 28),
(1, 128, 14, 14),
(1, 128, 56, 56),
(1, 32, 64, 64),
(1, 512, 7, 7),
(1, 32, 32, 32),
(128, 28, 28),
(64, 28, 28),
(256, 28, 28),
(128, 14, 14),
(128, 56, 56),
(32, 64, 64),
(512, 7, 7),
(32, 32, 32),
(128, 28),
(64, 28),
(256, 28),
(128, 14),
(128, 56),
(32, 64),
(512, 7),
(32, 32),
],
)
@pytest.mark.push
def test_equal(shape):
class Equal(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.eq(x, y)
x = torch.rand(shape)
y = x * 2.0
inputs = [x, y]
framework_model = Equal()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fce2680>
inputs = (tensor([[[4.96257e-01, 7.68222e-01, 8.84774e-02, ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
[1.85231e-01,...10e+00, 1.27705e+00],
[9.60871e-01, 1.12704e+00, 3.60162e-01, ..., 1.25919e+00, 2.78783e-01, 6.04253e-01]]]))
inputs_and_parameters = [tensor([[[4.96257e-01, 7.68222e-01, 8.84774e-02, ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
[1.85231e-01,...10e+00, 1.27705e+00],
[9.60871e-01, 1.12704e+00, 3.60162e-01, ..., 1.25919e+00, 2.78783e-01, 6.04253e-01]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_equal[shape12]
RuntimeError: Fatal error
Raw output
shape = (128, 56, 56)
@pytest.mark.parametrize(
"shape",
[
(1, 128, 28, 28),
(1, 64, 28, 28),
(1, 256, 28, 28),
(1, 128, 14, 14),
(1, 128, 56, 56),
(1, 32, 64, 64),
(1, 512, 7, 7),
(1, 32, 32, 32),
(128, 28, 28),
(64, 28, 28),
(256, 28, 28),
(128, 14, 14),
(128, 56, 56),
(32, 64, 64),
(512, 7, 7),
(32, 32, 32),
(128, 28),
(64, 28),
(256, 28),
(128, 14),
(128, 56),
(32, 64),
(512, 7),
(32, 32),
],
)
@pytest.mark.push
def test_equal(shape):
class Equal(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.eq(x, y)
x = torch.rand(shape)
y = x * 2.0
inputs = [x, y]
framework_model = Equal()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fb48af0>
inputs = (tensor([[[0.49626, 0.76822, 0.08848, ..., 0.56755, 0.83524, 0.20560],
[0.59317, 0.11235, 0.15346, ..., 0.5...8, 0.51179, ..., 1.10265, 1.07445, 1.95150],
[0.46215, 0.12042, 1.34537, ..., 1.28598, 0.35160, 1.53116]]]))
inputs_and_parameters = [tensor([[[0.49626, 0.76822, 0.08848, ..., 0.56755, 0.83524, 0.20560],
[0.59317, 0.11235, 0.15346, ..., 0.5...8, 0.51179, ..., 1.10265, 1.07445, 1.95150],
[0.46215, 0.12042, 1.34537, ..., 1.28598, 0.35160, 1.53116]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_equal[shape14]
RuntimeError: Fatal error
Raw output
shape = (512, 7, 7)
@pytest.mark.parametrize(
"shape",
[
(1, 128, 28, 28),
(1, 64, 28, 28),
(1, 256, 28, 28),
(1, 128, 14, 14),
(1, 128, 56, 56),
(1, 32, 64, 64),
(1, 512, 7, 7),
(1, 32, 32, 32),
(128, 28, 28),
(64, 28, 28),
(256, 28, 28),
(128, 14, 14),
(128, 56, 56),
(32, 64, 64),
(512, 7, 7),
(32, 32, 32),
(128, 28),
(64, 28),
(256, 28),
(128, 14),
(128, 56),
(32, 64),
(512, 7),
(32, 32),
],
)
@pytest.mark.push
def test_equal(shape):
class Equal(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.eq(x, y)
x = torch.rand(shape)
y = x * 2.0
inputs = [x, y]
framework_model = Equal()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fc65e10>
inputs = (tensor([[[0.49626, 0.76822, 0.08848, ..., 0.30742, 0.63408, 0.49009],
[0.89644, 0.45563, 0.63231, ..., 0.4...0, 0.23916, ..., 0.06386, 1.28559, 1.67222],
[1.13158, 1.59839, 0.17406, ..., 0.48676, 1.18056, 1.04700]]]))
inputs_and_parameters = [tensor([[[0.49626, 0.76822, 0.08848, ..., 0.30742, 0.63408, 0.49009],
[0.89644, 0.45563, 0.63231, ..., 0.4...0, 0.23916, ..., 0.06386, 1.28559, 1.67222],
[1.13158, 1.59839, 0.17406, ..., 0.48676, 1.18056, 1.04700]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_equal[shape16]
RuntimeError: Fatal error
Raw output
shape = (128, 28)
@pytest.mark.parametrize(
"shape",
[
(1, 128, 28, 28),
(1, 64, 28, 28),
(1, 256, 28, 28),
(1, 128, 14, 14),
(1, 128, 56, 56),
(1, 32, 64, 64),
(1, 512, 7, 7),
(1, 32, 32, 32),
(128, 28, 28),
(64, 28, 28),
(256, 28, 28),
(128, 14, 14),
(128, 56, 56),
(32, 64, 64),
(512, 7, 7),
(32, 32, 32),
(128, 28),
(64, 28),
(256, 28),
(128, 14),
(128, 56),
(32, 64),
(512, 7),
(32, 32),
],
)
@pytest.mark.push
def test_equal(shape):
class Equal(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.eq(x, y)
x = torch.rand(shape)
y = x * 2.0
inputs = [x, y]
framework_model = Equal()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fd06c50>
inputs = (tensor([[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0.567...669, 0.63338, ..., 0.93506, 0.22293, 0.77115],
[1.95175, 0.29128, 0.36889, ..., 1.87098, 0.72776, 0.58766]]))
inputs_and_parameters = [tensor([[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0.567...669, 0.63338, ..., 0.93506, 0.22293, 0.77115],
[1.95175, 0.29128, 0.36889, ..., 1.87098, 0.72776, 0.58766]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_equal[shape18]
RuntimeError: Fatal error
Raw output
shape = (256, 28)
@pytest.mark.parametrize(
"shape",
[
(1, 128, 28, 28),
(1, 64, 28, 28),
(1, 256, 28, 28),
(1, 128, 14, 14),
(1, 128, 56, 56),
(1, 32, 64, 64),
(1, 512, 7, 7),
(1, 32, 32, 32),
(128, 28, 28),
(64, 28, 28),
(256, 28, 28),
(128, 14, 14),
(128, 56, 56),
(32, 64, 64),
(512, 7, 7),
(32, 32, 32),
(128, 28),
(64, 28),
(256, 28),
(128, 14),
(128, 56),
(32, 64),
(512, 7),
(32, 32),
],
)
@pytest.mark.push
def test_equal(shape):
class Equal(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return torch.eq(x, y)
x = torch.rand(shape)
y = x * 2.0
inputs = [x, y]
framework_model = Equal()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb78fce65c0>
inputs = (tensor([[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0.567...671, 1.77202, ..., 0.70970, 0.84718, 1.58853],
[0.90683, 1.14361, 1.76465, ..., 0.58573, 1.86955, 1.20645]]))
inputs_and_parameters = [tensor([[0.49626, 0.76822, 0.08848, ..., 0.55291, 0.95274, 0.03616],
[0.18523, 0.37342, 0.30510, ..., 0.567...671, 1.77202, ..., 0.70970, 0.84718, 1.58853],
[0.90683, 1.14361, 1.76465, ..., 0.58573, 1.86955, 1.20645]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 227 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_greater_equal[dims0]
ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[ True, True, False, ..., False, False, True],
[ True, False, False, ..., False, False, False],
[False, False, True, ..., True, False, True],
...,
[ True, True, False, ..., False, True, True],
[False, False, False, ..., True, True, True],
[ True, True, False, ..., False, True, True]]]), compiled_model=tensor([[[1., 1., 0., ..., 0., 0., 1.],
[0., 1., 0., ..., 0., 1., 0.],
[1., 1., 0., ..., 1., 1., 0.],
...,
[0., 0., 1., ..., 0., 1., 0.],
[0., 0., 0., ..., 1., 1., 1.],
[0., 0., 1., ..., 0., 1., 1.]]])
Raw output
dims = (1, 32, 64)
@pytest.mark.parametrize("dims", [(1, 32, 64), (6, 33), (4, 16, 17)])
@pytest.mark.push
def test_greater_equal(dims):
class GreaterEqual(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
return torch.greater_equal(a, b)
inputs = [torch.rand(dims), torch.rand(dims)]
framework_model = GreaterEqual()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:227:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb829046c50>
fw_out = tensor([[[ True, True, False, ..., False, False, True],
[ True, False, False, ..., False, False, False],
... [False, False, False, ..., True, True, True],
[ True, True, False, ..., False, True, True]]])
co_out = tensor([[[1., 1., 0., ..., 0., 0., 1.],
[0., 1., 0., ..., 0., 1., 0.],
[1., 1., 0., ..., 1., 1., ... [0., 0., 1., ..., 0., 1., 0.],
[0., 0., 0., ..., 1., 1., 1.],
[0., 0., 1., ..., 0., 1., 1.]]])
def check(self, fw_out, co_out):
if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
> raise ValueError(
f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
)
E ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[ True, True, False, ..., False, False, True],
E [ True, False, False, ..., False, False, False],
E [False, False, True, ..., True, False, True],
E ...,
E [ True, True, False, ..., False, True, True],
E [False, False, False, ..., True, True, True],
E [ True, True, False, ..., False, True, True]]]), compiled_model=tensor([[[1., 1., 0., ..., 0., 0., 1.],
E [0., 1., 0., ..., 0., 1., 0.],
E [1., 1., 0., ..., 1., 1., 0.],
E ...,
E [0., 0., 1., ..., 0., 1., 0.],
E [0., 0., 0., ..., 1., 1., 1.],
E [0., 0., 1., ..., 0., 1., 1.]]])
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError
Check failure on line 227 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_greater_equal[dims2]
RuntimeError: Fatal error
Raw output
dims = (4, 16, 17)
@pytest.mark.parametrize("dims", [(1, 32, 64), (6, 33), (4, 16, 17)])
@pytest.mark.push
def test_greater_equal(dims):
class GreaterEqual(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
return torch.greater_equal(a, b)
inputs = [torch.rand(dims), torch.rand(dims)]
framework_model = GreaterEqual()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:227:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7fb7f08bbd30>
inputs = (tensor([[[0.49626, 0.76822, 0.08848, ..., 0.29389, 0.51852, 0.69767],
[0.80001, 0.16103, 0.28227, ..., 0.9...9, 0.25128, ..., 0.70247, 0.00238, 0.29352],
[0.36860, 0.86717, 0.48542, ..., 0.59949, 0.41904, 0.75899]]]))
inputs_and_parameters = [tensor([[[0.49626, 0.76822, 0.08848, ..., 0.29389, 0.51852, 0.69767],
[0.80001, 0.16103, 0.28227, ..., 0.9...9, 0.25128, ..., 0.70247, 0.00238, 0.29352],
[0.36860, 0.86717, 0.48542, ..., 0.59949, 0.41904, 0.75899]]])]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Fatal error
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 268 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_multiply[shape0]
ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[2.30743e-01, 2.13187e-02, 1.87313e-02, ..., 3.22881e-01, 1.84691e-02, 4.23863e-01],
[1.60184e-01, 1.87168e-01, 1.38811e-01, ..., 5.98106e-01, 1.46211e-01, 2.97836e-01],
[3.63389e-01, 8.14409e-02, 1.65667e-01, ..., 9.60350e-02, 3.18001e-04, 5.39877e-02],
...,
[8.22859e-01, 2.26655e-01, 3.01797e-01, ..., 4.67895e-02, 4.29371e-01, 3.21169e-01],
[1.09746e-01, 2.58656e-01, 7.23061e-01, ..., 5.23428e-01, 4.61224e-01, 4.98801e-01],
[2.67098e-01, 2.77467e-01, 1.18235e-01, ..., 1.10228e-01, 1.73405e-01, 2.09750e-01]]]), compiled_model=tensor([[[0.23074, 0.02132, 0.01873, ..., 0.21045, 0.50561, 0.35740],
[0.36339, 0.08144, 0.16567, ..., 0.54940, 0.10816, 0.20594],
[0.02591, 0.04576, 0.35191, ..., 0.22376, 0.03701, 0.30759],
...,
[0.17571, 0.39912, 0.33128, ..., 0.54841, 0.19968, 0.64131],
[0.41272, 0.44021, 0.06422, ..., 0.04679, 0.42937, 0.32117],
[0.58724, 0.55476, 0.00146, ..., 0.11023, 0.17341, 0.20975]]])
Raw output
shape = (1, 32, 32)
@pytest.mark.parametrize(
"shape",
[
(1, 32, 32),
(12, 8640),
],
)
@pytest.mark.push
def test_multiply(shape):
class Multiply(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
return a * b
inputs = [torch.rand(shape), torch.rand(shape)]
framework_model = Multiply()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model)
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:268:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb829046c50>
fw_out = tensor([[[2.30743e-01, 2.13187e-02, 1.87313e-02, ..., 3.22881e-01, 1.84691e-02, 4.23863e-01],
[1.60184e-01, ...224e-01, 4.98801e-01],
[2.67098e-01, 2.77467e-01, 1.18235e-01, ..., 1.10228e-01, 1.73405e-01, 2.09750e-01]]])
co_out = tensor([[[0.23074, 0.02132, 0.01873, ..., 0.21045, 0.50561, 0.35740],
[0.36339, 0.08144, 0.16567, ..., 0.54...21, 0.06422, ..., 0.04679, 0.42937, 0.32117],
[0.58724, 0.55476, 0.00146, ..., 0.11023, 0.17341, 0.20975]]])
def check(self, fw_out, co_out):
if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
> raise ValueError(
f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
)
E ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[2.30743e-01, 2.13187e-02, 1.87313e-02, ..., 3.22881e-01, 1.84691e-02, 4.23863e-01],
E [1.60184e-01, 1.87168e-01, 1.38811e-01, ..., 5.98106e-01, 1.46211e-01, 2.97836e-01],
E [3.63389e-01, 8.14409e-02, 1.65667e-01, ..., 9.60350e-02, 3.18001e-04, 5.39877e-02],
E ...,
E [8.22859e-01, 2.26655e-01, 3.01797e-01, ..., 4.67895e-02, 4.29371e-01, 3.21169e-01],
E [1.09746e-01, 2.58656e-01, 7.23061e-01, ..., 5.23428e-01, 4.61224e-01, 4.98801e-01],
E [2.67098e-01, 2.77467e-01, 1.18235e-01, ..., 1.10228e-01, 1.73405e-01, 2.09750e-01]]]), compiled_model=tensor([[[0.23074, 0.02132, 0.01873, ..., 0.21045, 0.50561, 0.35740],
E [0.36339, 0.08144, 0.16567, ..., 0.54940, 0.10816, 0.20594],
E [0.02591, 0.04576, 0.35191, ..., 0.22376, 0.03701, 0.30759],
E ...,
E [0.17571, 0.39912, 0.33128, ..., 0.54841, 0.19968, 0.64131],
E [0.41272, 0.44021, 0.06422, ..., 0.04679, 0.42937, 0.32117],
E [0.58724, 0.55476, 0.00146, ..., 0.11023, 0.17341, 0.20975]]])
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError
Check failure on line 285 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py
github-actions / TT-Forge-FE Tests
test_eltwise_binary.test_remainder
ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[1.88596e-02, 5.70076e-02, 8.84774e-02, ..., 1.30106e-01, 3.05100e-01, 1.27435e-01],
[1.75910e-01, 1.04542e-01, 6.76298e-02, ..., 7.01080e-01, 2.03824e-01, 2.87809e-01],
[2.53406e-01, 4.36891e-01, 5.19091e-01, ..., 6.86956e-01, 5.13238e-03, 1.22614e-02],
...,
[2.86729e-02, 2.28457e-01, 4.16722e-01, ..., 5.07159e-02, 2.16366e-01, 5.22463e-01],
[1.42664e-01, 2.27151e-01, 1.48307e-01, ..., 7.65548e-02, 5.43109e-01, 9.31423e-02],
[2.81639e-01, 3.39219e-01, 3.03163e-01, ..., 1.87625e-01, 2.09853e-01, 7.20999e-01]],
[[4.64967e-01, 2.77507e-02, 2.42230e-02, ..., 1.33870e-01, 6.05347e-02, 4.54788e-01],
[9.10603e-01, 9.60773e-03, 1.33804e-02, ..., 5.36203e-04, 1.16995e-01, 4.57468e-01],
[4.69200e-01, 1.86410e-01, 3.19149e-01, ..., 1.39798e-01, 6.19599e-02, 2.38437e-02],
...,
[1.87770e-01, 3.45039e-01, 3.03499e-01, ..., 5.03118e-02, 1.27488e-01, 2.37691e-01],
[6.58247e-02, 2.62718e-02, 7.56917e-01, ..., 6.60533e-01, 5.90422e-02, 5.60275e-01],
[4.16766e-02, 4.50887e-02, 1.40979e-01, ..., 5.87491e-01, 2.07020e-01, 3.65602e-02]]]), compiled_model=tensor([[[0.01886, 0.05701, 0.08848, ..., 0.01443, 0.38450, 0.08262],
[0.25341, 0.43689, 0.51909, ..., 0.00099, 0.00172, 0.05949],
[0.32508, 0.09019, 0.10191, ..., 0.17845, 0.04877, 0.39342],
...,
[0.01224, 0.05818, 0.51817, ..., 0.14695, 0.28581, 0.06272],
[0.58068, 0.14964, 0.34369, ..., 0.05072, 0.21637, 0.52246],
[0.13339, 0.00690, 0.00253, ..., 0.18763, 0.20985, 0.72100]],
[[0.46497, 0.02775, 0.02422, ..., 0.04163, 0.62003, 0.13092],
[0.46920, 0.18641, 0.31915, ..., 0.21160, 0.00657, 0.25849],
[0.07972, 0.18487, 0.14125, ..., 0.00517, 0.04853, 0.37678],
...,
[0.00489, 0.15390, 0.63933, ..., 0.64385, 0.13482, 0.06960],
[0.07710, 0.13619, 0.02169, ..., 0.05031, 0.12749, 0.23769],
[0.17644, 0.08137, 0.57631, ..., 0.58749, 0.20702, 0.03656]]])
Raw output
@pytest.mark.push
def test_remainder():
class Remainder(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
return a % b
inputs = [torch.rand(2, 32, 32), torch.rand(2, 32, 32)]
framework_model = Remainder()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model)
forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:285:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb829046c50>
fw_out = tensor([[[1.88596e-02, 5.70076e-02, 8.84774e-02, ..., 1.30106e-01, 3.05100e-01, 1.27435e-01],
[1.75910e-01, ...422e-02, 5.60275e-01],
[4.16766e-02, 4.50887e-02, 1.40979e-01, ..., 5.87491e-01, 2.07020e-01, 3.65602e-02]]])
co_out = tensor([[[0.01886, 0.05701, 0.08848, ..., 0.01443, 0.38450, 0.08262],
[0.25341, 0.43689, 0.51909, ..., 0.00...19, 0.02169, ..., 0.05031, 0.12749, 0.23769],
[0.17644, 0.08137, 0.57631, ..., 0.58749, 0.20702, 0.03656]]])
def check(self, fw_out, co_out):
if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
> raise ValueError(
f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
)
E ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[1.88596e-02, 5.70076e-02, 8.84774e-02, ..., 1.30106e-01, 3.05100e-01, 1.27435e-01],
E [1.75910e-01, 1.04542e-01, 6.76298e-02, ..., 7.01080e-01, 2.03824e-01, 2.87809e-01],
E [2.53406e-01, 4.36891e-01, 5.19091e-01, ..., 6.86956e-01, 5.13238e-03, 1.22614e-02],
E ...,
E [2.86729e-02, 2.28457e-01, 4.16722e-01, ..., 5.07159e-02, 2.16366e-01, 5.22463e-01],
E [1.42664e-01, 2.27151e-01, 1.48307e-01, ..., 7.65548e-02, 5.43109e-01, 9.31423e-02],
E [2.81639e-01, 3.39219e-01, 3.03163e-01, ..., 1.87625e-01, 2.09853e-01, 7.20999e-01]],
E
E [[4.64967e-01, 2.77507e-02, 2.42230e-02, ..., 1.33870e-01, 6.05347e-02, 4.54788e-01],
E [9.10603e-01, 9.60773e-03, 1.33804e-02, ..., 5.36203e-04, 1.16995e-01, 4.57468e-01],
E [4.69200e-01, 1.86410e-01, 3.19149e-01, ..., 1.39798e-01, 6.19599e-02, 2.38437e-02],
E ...,
E [1.87770e-01, 3.45039e-01, 3.03499e-01, ..., 5.03118e-02, 1.27488e-01, 2.37691e-01],
E [6.58247e-02, 2.62718e-02, 7.56917e-01, ..., 6.60533e-01, 5.90422e-02, 5.60275e-01],
E [4.16766e-02, 4.50887e-02, 1.40979e-01, ..., 5.87491e-01, 2.07020e-01, 3.65602e-02]]]), compiled_model=tensor([[[0.01886, 0.05701, 0.08848, ..., 0.01443, 0.38450, 0.08262],
E [0.25341, 0.43689, 0.51909, ..., 0.00099, 0.00172, 0.05949],
E [0.32508, 0.09019, 0.10191, ..., 0.17845, 0.04877, 0.39342],
E ...,
E [0.01224, 0.05818, 0.51817, ..., 0.14695, 0.28581, 0.06272],
E [0.58068, 0.14964, 0.34369, ..., 0.05072, 0.21637, 0.52246],
E [0.13339, 0.00690, 0.00253, ..., 0.18763, 0.20985, 0.72100]],
E
E [[0.46497, 0.02775, 0.02422, ..., 0.04163, 0.62003, 0.13092],
E [0.46920, 0.18641, 0.31915, ..., 0.21160, 0.00657, 0.25849],
E [0.07972, 0.18487, 0.14125, ..., 0.00517, 0.04853, 0.37678],
E ...,
E [0.00489, 0.15390, 0.63933, ..., 0.64385, 0.13482, 0.06960],
E [0.07710, 0.13619, 0.02169, ..., 0.05031, 0.12749, 0.23769],
E [0.17644, 0.08137, 0.57631, ..., 0.58749, 0.20702, 0.03656]]])
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError
Check failure on line 74 in forge/test/mlir/operators/eltwise_nary/test_eltwise_nary.py
github-actions / TT-Forge-FE Tests
test_eltwise_nary.test_concat[1]
ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.37342, 0.30510, 0.93200],
[0.17591, 0.26983, 0.15068, ..., 0.70108, 0.20382, 0.65105],
[0.77449, 0.43689, 0.51909, ..., 0.68696, 0.00513, 0.17565],
...,
[0.87865, 0.65690, 0.99439, ..., 0.22687, 0.66635, 0.52246],
[0.14266, 0.60759, 0.95527, ..., 0.79243, 0.54311, 0.89028],
[0.59370, 0.33922, 0.83867, ..., 0.18763, 0.20985, 0.72100]],
[[0.46497, 0.02775, 0.21171, ..., 0.86467, 0.06053, 0.45479],
[0.91060, 0.69364, 0.92123, ..., 0.85312, 0.71734, 0.45747],
[0.46920, 0.18641, 0.31915, ..., 0.13980, 0.06196, 0.30736],
...,
[0.93650, 0.34504, 0.30350, ..., 0.20624, 0.64436, 0.61472],
[0.76926, 0.42571, 0.75692, ..., 0.66053, 0.84923, 0.56027],
[0.44989, 0.81796, 0.14098, ..., 0.58749, 0.82632, 0.29092]],
[[0.86372, 0.44708, 0.29018, ..., 0.63317, 0.00213, 0.55566],
[0.25254, 0.18693, 0.29124, ..., 0.56491, 0.84059, 0.45782],
[0.04127, 0.67283, 0.37236, ..., 0.22374, 0.97360, 0.65411],
...,
[0.39305, 0.09317, 0.62254, ..., 0.00973, 0.92761, 0.82385],
[0.70674, 0.98937, 0.95153, ..., 0.01253, 0.07101, 0.18000],
[0.63998, 0.53875, 0.54839, ..., 0.66660, 0.75445, 0.55228]],
[[0.25216, 0.01999, 0.77026, ..., 0.70651, 0.53290, 0.03202],
[0.95468, 0.81882, 0.96093, ..., 0.73626, 0.88933, 0.54914],
[0.78162, 0.26386, 0.77911, ..., 0.78924, 0.71456, 0.27928],
...,
[0.94518, 0.64395, 0.13829, ..., 0.71353, 0.36638, 0.91316],
[0.93311, 0.99542, 0.66559, ..., 0.58988, 0.84228, 0.44822],
[0.81648, 0.89002, 0.10344, ..., 0.08316, 0.94820, 0.05317]]],
[[[0.23870, 0.35561, 0.17639, ..., 0.24331, 0.60709, 0.26819],
[0.30522, 0.16529, 0.08305, ..., 0.81413, 0.58980, 0.36324],
[0.52108, 0.94565, 0.55422, ..., 0.97859, 0.06696, 0.16339],
...,
[0.84998, 0.42844, 0.57767, ..., 0.05872, 0.44998, 0.66811],
[0.98240, 0.38044, 0.80696, ..., 0.08948, 0.69318, 0.19928],
[0.31206, 0.37515, 0.53550, ..., 0.65332, 0.69523, 0.77523]],
[[0.96680, 0.87950, 0.06249, ..., 0.36540, 0.57885, 0.54770],
[0.91077, 0.34202, 0.10087, ..., 0.01640, 0.60035, 0.97705],
[0.64389, 0.49734, 0.39635, ..., 0.73912, 0.62680, 0.28351],
...,
[0.74873, 0.36740, 0.87750, ..., 0.15593, 0.51687, 0.37703],
[0.23448, 0.04438, 0.91828, ..., 0.70514, 0.79019, 0.79197],
[0.40821, 0.77287, 0.23524, ..., 0.71737, 0.61930, 0.06359]],
[[0.30815, 0.78052, 0.73725, ..., 0.31383, 0.30019, 0.27556],
[0.37532, 0.59212, 0.21232, ..., 0.65175, 0.45164, 0.10243],
[0.66096, 0.50135, 0.16896, ..., 0.86458, 0.09104, 0.03321],
...,
[0.03972, 0.24895, 0.66610, ..., 0.68865, 0.19042, 0.33843],
[0.27013, 0.24829, 0.50712, ..., 0.59776, 0.20926, 0.73308],
[0.02588, 0.35485, 0.42359, ..., 0.29286, 0.93478, 0.60322]],
[[0.04608, 0.43269, 0.61249, ..., 0.70147, 0.92043, 0.43012],
[0.91142, 0.36865, 0.40018, ..., 0.37903, 0.41102, 0.79478],
[0.85051, 0.86100, 0.86220, ..., 0.37996, 0.69902, 0.40696],
...,
[0.07279, 0.03675, 0.83493, ..., 0.58213, 0.28210, 0.59853],
[0.31328, 0.66605, 0.56608, ..., 0.58852, 0.49105, 0.42558],
[0.09741, 0.48257, 0.51470, ..., 0.78986, 0.69613, 0.51850]]]]), compiled_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.24224, 0.81547, 0.79316],
[0.77449, 0.43689, 0.51909, ..., 0.57507, 0.29523, 0.79669],
[0.32508, 0.09019, 0.39364, ..., 0.68108, 0.04877, 0.81635],
...,
[0.52301, 0.59785, 0.51817, ..., 0.85177, 0.28581, 0.73051],
[0.58068, 0.62078, 0.93923, ..., 0.22687, 0.66635, 0.52246],
[0.75544, 0.94095, 0.00253, ..., 0.18763, 0.20985, 0.72100]],
[[0.46497, 0.02775, 0.21171, ..., 0.86879, 0.62003, 0.45060],
[0.46920, 0.18641, 0.31915, ..., 0.95536, 0.36635, 0.25849],
[0.07972, 0.50737, 0.89399, ..., 0.32854, 0.75889, 0.37678],
...,
[0.33596, 0.66760, 0.63933, ..., 0.64385, 0.69866, 0.87790],
[0.71076, 0.70912, 0.06837, ..., 0.20624, 0.64436, 0.61472],
[0.77735, 0.58957, 0.57631, ..., 0.58749, 0.82632, 0.29092]],
[[0.86372, 0.44708, 0.29018, ..., 0.45814, 0.71867, 0.14801],
[0.04127, 0.67283, 0.37236, ..., 0.63795, 0.68100, 0.35817],
[0.72055, 0.89777, 0.97714, ..., 0.50473, 0.42064, 0.70811],
...,
[0.48486, 0.93278, 0.88529, ..., 0.04771, 0.74600, 0.38908],
[0.51226, 0.94264, 0.62312, ..., 0.00973, 0.92761, 0.82385],
[0.54863, 0.67255, 0.06350, ..., 0.66660, 0.75445, 0.55228]],
[[0.25216, 0.01999, 0.77026, ..., 0.05783, 0.36690, 0.84053],
[0.78162, 0.26386, 0.77911, ..., 0.37326, 0.59172, 0.26350],
[0.01291, 0.60111, 0.08612, ..., 0.86630, 0.98609, 0.97843],
...,
[0.97866, 0.95199, 0.84927, ..., 0.80780, 0.00904, 0.00848],
[0.19424, 0.88453, 0.68146, ..., 0.71353, 0.36638, 0.91316],
[0.88112, 0.75869, 0.18943, ..., 0.08316, 0.94820, 0.05317]]],
[[[0.23870, 0.35561, 0.17639, ..., 0.11390, 0.43097, 0.35527],
[0.52108, 0.94565, 0.55422, ..., 0.00238, 0.29352, 0.36860],
[0.80079, 0.41645, 0.29173, ..., 0.25131, 0.43844, 0.42293],
...,
[0.51077, 0.17989, 0.87972, ..., 0.70482, 0.55766, 0.66779],
[0.82478, 0.23557, 0.59554, ..., 0.05872, 0.44998, 0.66811],
[0.62205, 0.23351, 0.92696, ..., 0.65332, 0.69523, 0.77523]],
[[0.96680, 0.87950, 0.06249, ..., 0.41358, 0.95622, 0.31968],
[0.64389, 0.49734, 0.39635, ..., 0.74375, 0.17989, 0.62373],
[0.30778, 0.32249, 0.37637, ..., 0.01617, 0.14207, 0.83473],
...,
[0.04138, 0.25685, 0.83770, ..., 0.87596, 0.56384, 0.10104],
[0.07921, 0.57293, 0.04668, ..., 0.15593, 0.51687, 0.37703],
[0.30046, 0.16940, 0.65170, ..., 0.71737, 0.61930, 0.06359]],
[[0.30815, 0.78052, 0.73725, ..., 0.28251, 0.36017, 0.11348],
[0.66096, 0.50135, 0.16896, ..., 0.43931, 0.31940, 0.11459],
[0.70175, 0.02967, 0.01597, ..., 0.02762, 0.00981, 0.08400],
...,
[0.44431, 0.44570, 0.12421, ..., 0.77621, 0.52715, 0.00565],
[0.22623, 0.33509, 0.82225, ..., 0.68865, 0.19042, 0.33843],
[0.71139, 0.30818, 0.39749, ..., 0.29286, 0.93478, 0.60322]],
[[0.04608, 0.43269, 0.61249, ..., 0.72383, 0.02110, 0.42683],
[0.85051, 0.86100, 0.86220, ..., 0.15342, 0.60893, 0.50263],
[0.78417, 0.30762, 0.79736, ..., 0.31321, 0.80574, 0.86518],
...,
[0.24800, 0.38302, 0.69543, ..., 0.60396, 0.61377, 0.63255],
[0.05496, 0.12636, 0.19282, ..., 0.58213, 0.28210, 0.59853],
[0.27152, 0.86765, 0.59504, ..., 0.78986, 0.69613, 0.51850]]]])
Raw output
inputs_and_dim = ((2, 2, 32, 32), (2, 2, 32, 32), 1)
@pytest.mark.parametrize(
"inputs_and_dim",
[
((2, 2, 32, 32), (2, 2, 32, 32), 0),
((2, 2, 32, 32), (2, 2, 32, 32), 1),
((2, 2, 32, 32), (2, 2, 32, 32), 2),
((2, 2, 32, 32), (2, 2, 32, 32), 3),
((2, 2, 32, 32), (2, 2, 32, 32), -1),
((2, 2, 32, 32), (2, 2, 32, 32), -2),
((2, 2, 32, 32), (2, 2, 32, 32), -3),
((2, 2, 32, 32), (2, 2, 32, 32), -4),
],
ids=["0", "1", "2", "3", "-1", "-2", "-3", "-4"],
)
@pytest.mark.push
def test_concat(inputs_and_dim):
in_shape1, in_shape2, dim = inputs_and_dim
class Concat(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
return torch.cat((a, b), dim)
inputs = [torch.rand(in_shape1), torch.rand(in_shape2)]
framework_model = Concat()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model)
forge/test/mlir/operators/eltwise_nary/test_eltwise_nary.py:74:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb829046c50>
fw_out = tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.37342, 0.30510, 0.93200],
[0.17591, 0.26983, 0.15068, ..., 0...., 0.56608, ..., 0.58852, 0.49105, 0.42558],
[0.09741, 0.48257, 0.51470, ..., 0.78986, 0.69613, 0.51850]]]])
co_out = tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.24224, 0.81547, 0.79316],
[0.77449, 0.43689, 0.51909, ..., 0...., 0.19282, ..., 0.58213, 0.28210, 0.59853],
[0.27152, 0.86765, 0.59504, ..., 0.78986, 0.69613, 0.51850]]]])
def check(self, fw_out, co_out):
if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
> raise ValueError(
f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
)
E ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.37342, 0.30510, 0.93200],
E [0.17591, 0.26983, 0.15068, ..., 0.70108, 0.20382, 0.65105],
E [0.77449, 0.43689, 0.51909, ..., 0.68696, 0.00513, 0.17565],
E ...,
E [0.87865, 0.65690, 0.99439, ..., 0.22687, 0.66635, 0.52246],
E [0.14266, 0.60759, 0.95527, ..., 0.79243, 0.54311, 0.89028],
E [0.59370, 0.33922, 0.83867, ..., 0.18763, 0.20985, 0.72100]],
E
E [[0.46497, 0.02775, 0.21171, ..., 0.86467, 0.06053, 0.45479],
E [0.91060, 0.69364, 0.92123, ..., 0.85312, 0.71734, 0.45747],
E [0.46920, 0.18641, 0.31915, ..., 0.13980, 0.06196, 0.30736],
E ...,
E [0.93650, 0.34504, 0.30350, ..., 0.20624, 0.64436, 0.61472],
E [0.76926, 0.42571, 0.75692, ..., 0.66053, 0.84923, 0.56027],
E [0.44989, 0.81796, 0.14098, ..., 0.58749, 0.82632, 0.29092]],
E
E [[0.86372, 0.44708, 0.29018, ..., 0.63317, 0.00213, 0.55566],
E [0.25254, 0.18693, 0.29124, ..., 0.56491, 0.84059, 0.45782],
E [0.04127, 0.67283, 0.37236, ..., 0.22374, 0.97360, 0.65411],
E ...,
E [0.39305, 0.09317, 0.62254, ..., 0.00973, 0.92761, 0.82385],
E [0.70674, 0.98937, 0.95153, ..., 0.01253, 0.07101, 0.18000],
E [0.63998, 0.53875, 0.54839, ..., 0.66660, 0.75445, 0.55228]],
E
E [[0.25216, 0.01999, 0.77026, ..., 0.70651, 0.53290, 0.03202],
E [0.95468, 0.81882, 0.96093, ..., 0.73626, 0.88933, 0.54914],
E [0.78162, 0.26386, 0.77911, ..., 0.78924, 0.71456, 0.27928],
E ...,
E [0.94518, 0.64395, 0.13829, ..., 0.71353, 0.36638, 0.91316],
E [0.93311, 0.99542, 0.66559, ..., 0.58988, 0.84228, 0.44822],
E [0.81648, 0.89002, 0.10344, ..., 0.08316, 0.94820, 0.05317]]],
E
E
E [[[0.23870, 0.35561, 0.17639, ..., 0.24331, 0.60709, 0.26819],
E [0.30522, 0.16529, 0.08305, ..., 0.81413, 0.58980, 0.36324],
E [0.52108, 0.94565, 0.55422, ..., 0.97859, 0.06696, 0.16339],
E ...,
E [0.84998, 0.42844, 0.57767, ..., 0.05872, 0.44998, 0.66811],
E [0.98240, 0.38044, 0.80696, ..., 0.08948, 0.69318, 0.19928],
E [0.31206, 0.37515, 0.53550, ..., 0.65332, 0.69523, 0.77523]],
E
E [[0.96680, 0.87950, 0.06249, ..., 0.36540, 0.57885, 0.54770],
E [0.91077, 0.34202, 0.10087, ..., 0.01640, 0.60035, 0.97705],
E [0.64389, 0.49734, 0.39635, ..., 0.73912, 0.62680, 0.28351],
E ...,
E [0.74873, 0.36740, 0.87750, ..., 0.15593, 0.51687, 0.37703],
E [0.23448, 0.04438, 0.91828, ..., 0.70514, 0.79019, 0.79197],
E [0.40821, 0.77287, 0.23524, ..., 0.71737, 0.61930, 0.06359]],
E
E [[0.30815, 0.78052, 0.73725, ..., 0.31383, 0.30019, 0.27556],
E [0.37532, 0.59212, 0.21232, ..., 0.65175, 0.45164, 0.10243],
E [0.66096, 0.50135, 0.16896, ..., 0.86458, 0.09104, 0.03321],
E ...,
E [0.03972, 0.24895, 0.66610, ..., 0.68865, 0.19042, 0.33843],
E [0.27013, 0.24829, 0.50712, ..., 0.59776, 0.20926, 0.73308],
E [0.02588, 0.35485, 0.42359, ..., 0.29286, 0.93478, 0.60322]],
E
E [[0.04608, 0.43269, 0.61249, ..., 0.70147, 0.92043, 0.43012],
E [0.91142, 0.36865, 0.40018, ..., 0.37903, 0.41102, 0.79478],
E [0.85051, 0.86100, 0.86220, ..., 0.37996, 0.69902, 0.40696],
E ...,
E [0.07279, 0.03675, 0.83493, ..., 0.58213, 0.28210, 0.59853],
E [0.31328, 0.66605, 0.56608, ..., 0.58852, 0.49105, 0.42558],
E [0.09741, 0.48257, 0.51470, ..., 0.78986, 0.69613, 0.51850]]]]), compiled_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.24224, 0.81547, 0.79316],
E [0.77449, 0.43689, 0.51909, ..., 0.57507, 0.29523, 0.79669],
E [0.32508, 0.09019, 0.39364, ..., 0.68108, 0.04877, 0.81635],
E ...,
E [0.52301, 0.59785, 0.51817, ..., 0.85177, 0.28581, 0.73051],
E [0.58068, 0.62078, 0.93923, ..., 0.22687, 0.66635, 0.52246],
E [0.75544, 0.94095, 0.00253, ..., 0.18763, 0.20985, 0.72100]],
E
E [[0.46497, 0.02775, 0.21171, ..., 0.86879, 0.62003, 0.45060],
E [0.46920, 0.18641, 0.31915, ..., 0.95536, 0.36635, 0.25849],
E [0.07972, 0.50737, 0.89399, ..., 0.32854, 0.75889, 0.37678],
E ...,
E [0.33596, 0.66760, 0.63933, ..., 0.64385, 0.69866, 0.87790],
E [0.71076, 0.70912, 0.06837, ..., 0.20624, 0.64436, 0.61472],
E [0.77735, 0.58957, 0.57631, ..., 0.58749, 0.82632, 0.29092]],
E
E [[0.86372, 0.44708, 0.29018, ..., 0.45814, 0.71867, 0.14801],
E [0.04127, 0.67283, 0.37236, ..., 0.63795, 0.68100, 0.35817],
E [0.72055, 0.89777, 0.97714, ..., 0.50473, 0.42064, 0.70811],
E ...,
E [0.48486, 0.93278, 0.88529, ..., 0.04771, 0.74600, 0.38908],
E [0.51226, 0.94264, 0.62312, ..., 0.00973, 0.92761, 0.82385],
E [0.54863, 0.67255, 0.06350, ..., 0.66660, 0.75445, 0.55228]],
E
E [[0.25216, 0.01999, 0.77026, ..., 0.05783, 0.36690, 0.84053],
E [0.78162, 0.26386, 0.77911, ..., 0.37326, 0.59172, 0.26350],
E [0.01291, 0.60111, 0.08612, ..., 0.86630, 0.98609, 0.97843],
E ...,
E [0.97866, 0.95199, 0.84927, ..., 0.80780, 0.00904, 0.00848],
E [0.19424, 0.88453, 0.68146, ..., 0.71353, 0.36638, 0.91316],
E [0.88112, 0.75869, 0.18943, ..., 0.08316, 0.94820, 0.05317]]],
E
E
E [[[0.23870, 0.35561, 0.17639, ..., 0.11390, 0.43097, 0.35527],
E [0.52108, 0.94565, 0.55422, ..., 0.00238, 0.29352, 0.36860],
E [0.80079, 0.41645, 0.29173, ..., 0.25131, 0.43844, 0.42293],
E ...,
E [0.51077, 0.17989, 0.87972, ..., 0.70482, 0.55766, 0.66779],
E [0.82478, 0.23557, 0.59554, ..., 0.05872, 0.44998, 0.66811],
E [0.62205, 0.23351, 0.92696, ..., 0.65332, 0.69523, 0.77523]],
E
E [[0.96680, 0.87950, 0.06249, ..., 0.41358, 0.95622, 0.31968],
E [0.64389, 0.49734, 0.39635, ..., 0.74375, 0.17989, 0.62373],
E [0.30778, 0.32249, 0.37637, ..., 0.01617, 0.14207, 0.83473],
E ...,
E [0.04138, 0.25685, 0.83770, ..., 0.87596, 0.56384, 0.10104],
E [0.07921, 0.57293, 0.04668, ..., 0.15593, 0.51687, 0.37703],
E [0.30046, 0.16940, 0.65170, ..., 0.71737, 0.61930, 0.06359]],
E
E [[0.30815, 0.78052, 0.73725, ..., 0.28251, 0.36017, 0.11348],
E [0.66096, 0.50135, 0.16896, ..., 0.43931, 0.31940, 0.11459],
E [0.70175, 0.02967, 0.01597, ..., 0.02762, 0.00981, 0.08400],
E ...,
E [0.44431, 0.44570, 0.12421, ..., 0.77621, 0.52715, 0.00565],
E [0.22623, 0.33509, 0.82225, ..., 0.68865, 0.19042, 0.33843],
E [0.71139, 0.30818, 0.39749, ..., 0.29286, 0.93478, 0.60322]],
E
E [[0.04608, 0.43269, 0.61249, ..., 0.72383, 0.02110, 0.42683],
E [0.85051, 0.86100, 0.86220, ..., 0.15342, 0.60893, 0.50263],
E [0.78417, 0.30762, 0.79736, ..., 0.31321, 0.80574, 0.86518],
E ...,
E [0.24800, 0.38302, 0.69543, ..., 0.60396, 0.61377, 0.63255],
E [0.05496, 0.12636, 0.19282, ..., 0.58213, 0.28210, 0.59853],
E [0.27152, 0.86765, 0.59504, ..., 0.78986, 0.69613, 0.51850]]]])
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError
Check failure on line 74 in forge/test/mlir/operators/eltwise_nary/test_eltwise_nary.py
github-actions / TT-Forge-FE Tests
test_eltwise_nary.test_concat[3]
ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.63317, 0.00213, 0.55566],
[0.17591, 0.26983, 0.15068, ..., 0.56491, 0.84059, 0.45782],
[0.77449, 0.43689, 0.51909, ..., 0.22374, 0.97360, 0.65411],
...,
[0.87865, 0.65690, 0.99439, ..., 0.00973, 0.92761, 0.82385],
[0.14266, 0.60759, 0.95527, ..., 0.01253, 0.07101, 0.18000],
[0.59370, 0.33922, 0.83867, ..., 0.66660, 0.75445, 0.55228]],
[[0.46497, 0.02775, 0.21171, ..., 0.70651, 0.53290, 0.03202],
[0.91060, 0.69364, 0.92123, ..., 0.73626, 0.88933, 0.54914],
[0.46920, 0.18641, 0.31915, ..., 0.78924, 0.71456, 0.27928],
...,
[0.93650, 0.34504, 0.30350, ..., 0.71353, 0.36638, 0.91316],
[0.76926, 0.42571, 0.75692, ..., 0.58988, 0.84228, 0.44822],
[0.44989, 0.81796, 0.14098, ..., 0.08316, 0.94820, 0.05317]]],
[[[0.23870, 0.35561, 0.17639, ..., 0.31383, 0.30019, 0.27556],
[0.30522, 0.16529, 0.08305, ..., 0.65175, 0.45164, 0.10243],
[0.52108, 0.94565, 0.55422, ..., 0.86458, 0.09104, 0.03321],
...,
[0.84998, 0.42844, 0.57767, ..., 0.68865, 0.19042, 0.33843],
[0.98240, 0.38044, 0.80696, ..., 0.59776, 0.20926, 0.73308],
[0.31206, 0.37515, 0.53550, ..., 0.29286, 0.93478, 0.60322]],
[[0.96680, 0.87950, 0.06249, ..., 0.70147, 0.92043, 0.43012],
[0.91077, 0.34202, 0.10087, ..., 0.37903, 0.41102, 0.79478],
[0.64389, 0.49734, 0.39635, ..., 0.37996, 0.69902, 0.40696],
...,
[0.74873, 0.36740, 0.87750, ..., 0.58213, 0.28210, 0.59853],
[0.23448, 0.04438, 0.91828, ..., 0.58852, 0.49105, 0.42558],
[0.40821, 0.77287, 0.23524, ..., 0.78986, 0.69613, 0.51850]]]]), compiled_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.57507, 0.29523, 0.79669],
[0.32508, 0.09019, 0.39364, ..., 0.14655, 0.28809, 0.64706],
[0.00443, 0.72570, 0.25987, ..., 0.96911, 0.90056, 0.05348],
...,
[0.78539, 0.42285, 0.46502, ..., 0.69568, 0.98749, 0.40908],
[0.57781, 0.06044, 0.13575, ..., 0.04771, 0.74600, 0.38908],
[0.51226, 0.94264, 0.62312, ..., 0.66660, 0.75445, 0.55228]],
[[0.46497, 0.02775, 0.21171, ..., 0.95536, 0.36635, 0.25849],
[0.07972, 0.50737, 0.89399, ..., 0.55231, 0.92684, 0.13635],
[0.12650, 0.44209, 0.57301, ..., 0.85777, 0.46557, 0.97090],
...,
[0.28385, 0.05734, 0.64000, ..., 0.20046, 0.71333, 0.49261],
[0.20087, 0.84702, 0.14897, ..., 0.80780, 0.00904, 0.00848],
[0.19424, 0.88453, 0.68146, ..., 0.08316, 0.94820, 0.05317]]],
[[[0.23870, 0.35561, 0.17639, ..., 0.00238, 0.29352, 0.36860],
[0.80079, 0.41645, 0.29173, ..., 0.11632, 0.03397, 0.68710],
[0.02625, 0.05199, 0.78951, ..., 0.30858, 0.69761, 0.05672],
...,
[0.96961, 0.50332, 0.81668, ..., 0.11631, 0.98586, 0.39938],
[0.98270, 0.41026, 0.04981, ..., 0.77621, 0.52715, 0.00565],
[0.22623, 0.33509, 0.82225, ..., 0.29286, 0.93478, 0.60322]],
[[0.96680, 0.87950, 0.06249, ..., 0.74375, 0.17989, 0.62373],
[0.30778, 0.32249, 0.37637, ..., 0.94811, 0.40225, 0.96863],
[0.12963, 0.14516, 0.73509, ..., 0.12775, 0.16675, 0.00557],
...,
[0.35062, 0.67400, 0.59600, ..., 0.21123, 0.96146, 0.79786],
[0.62134, 0.93015, 0.79433, ..., 0.60396, 0.61377, 0.63255],
[0.05496, 0.12636, 0.19282, ..., 0.78986, 0.69613, 0.51850]]]])
Raw output
inputs_and_dim = ((2, 2, 32, 32), (2, 2, 32, 32), 3)
@pytest.mark.parametrize(
"inputs_and_dim",
[
((2, 2, 32, 32), (2, 2, 32, 32), 0),
((2, 2, 32, 32), (2, 2, 32, 32), 1),
((2, 2, 32, 32), (2, 2, 32, 32), 2),
((2, 2, 32, 32), (2, 2, 32, 32), 3),
((2, 2, 32, 32), (2, 2, 32, 32), -1),
((2, 2, 32, 32), (2, 2, 32, 32), -2),
((2, 2, 32, 32), (2, 2, 32, 32), -3),
((2, 2, 32, 32), (2, 2, 32, 32), -4),
],
ids=["0", "1", "2", "3", "-1", "-2", "-3", "-4"],
)
@pytest.mark.push
def test_concat(inputs_and_dim):
in_shape1, in_shape2, dim = inputs_and_dim
class Concat(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
return torch.cat((a, b), dim)
inputs = [torch.rand(in_shape1), torch.rand(in_shape2)]
framework_model = Concat()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model)
forge/test/mlir/operators/eltwise_nary/test_eltwise_nary.py:74:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb829046c50>
fw_out = tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.63317, 0.00213, 0.55566],
[0.17591, 0.26983, 0.15068, ..., 0...., 0.91828, ..., 0.58852, 0.49105, 0.42558],
[0.40821, 0.77287, 0.23524, ..., 0.78986, 0.69613, 0.51850]]]])
co_out = tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.57507, 0.29523, 0.79669],
[0.32508, 0.09019, 0.39364, ..., 0...., 0.79433, ..., 0.60396, 0.61377, 0.63255],
[0.05496, 0.12636, 0.19282, ..., 0.78986, 0.69613, 0.51850]]]])
def check(self, fw_out, co_out):
if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
> raise ValueError(
f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
)
E ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.63317, 0.00213, 0.55566],
E [0.17591, 0.26983, 0.15068, ..., 0.56491, 0.84059, 0.45782],
E [0.77449, 0.43689, 0.51909, ..., 0.22374, 0.97360, 0.65411],
E ...,
E [0.87865, 0.65690, 0.99439, ..., 0.00973, 0.92761, 0.82385],
E [0.14266, 0.60759, 0.95527, ..., 0.01253, 0.07101, 0.18000],
E [0.59370, 0.33922, 0.83867, ..., 0.66660, 0.75445, 0.55228]],
E
E [[0.46497, 0.02775, 0.21171, ..., 0.70651, 0.53290, 0.03202],
E [0.91060, 0.69364, 0.92123, ..., 0.73626, 0.88933, 0.54914],
E [0.46920, 0.18641, 0.31915, ..., 0.78924, 0.71456, 0.27928],
E ...,
E [0.93650, 0.34504, 0.30350, ..., 0.71353, 0.36638, 0.91316],
E [0.76926, 0.42571, 0.75692, ..., 0.58988, 0.84228, 0.44822],
E [0.44989, 0.81796, 0.14098, ..., 0.08316, 0.94820, 0.05317]]],
E
E
E [[[0.23870, 0.35561, 0.17639, ..., 0.31383, 0.30019, 0.27556],
E [0.30522, 0.16529, 0.08305, ..., 0.65175, 0.45164, 0.10243],
E [0.52108, 0.94565, 0.55422, ..., 0.86458, 0.09104, 0.03321],
E ...,
E [0.84998, 0.42844, 0.57767, ..., 0.68865, 0.19042, 0.33843],
E [0.98240, 0.38044, 0.80696, ..., 0.59776, 0.20926, 0.73308],
E [0.31206, 0.37515, 0.53550, ..., 0.29286, 0.93478, 0.60322]],
E
E [[0.96680, 0.87950, 0.06249, ..., 0.70147, 0.92043, 0.43012],
E [0.91077, 0.34202, 0.10087, ..., 0.37903, 0.41102, 0.79478],
E [0.64389, 0.49734, 0.39635, ..., 0.37996, 0.69902, 0.40696],
E ...,
E [0.74873, 0.36740, 0.87750, ..., 0.58213, 0.28210, 0.59853],
E [0.23448, 0.04438, 0.91828, ..., 0.58852, 0.49105, 0.42558],
E [0.40821, 0.77287, 0.23524, ..., 0.78986, 0.69613, 0.51850]]]]), compiled_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.57507, 0.29523, 0.79669],
E [0.32508, 0.09019, 0.39364, ..., 0.14655, 0.28809, 0.64706],
E [0.00443, 0.72570, 0.25987, ..., 0.96911, 0.90056, 0.05348],
E ...,
E [0.78539, 0.42285, 0.46502, ..., 0.69568, 0.98749, 0.40908],
E [0.57781, 0.06044, 0.13575, ..., 0.04771, 0.74600, 0.38908],
E [0.51226, 0.94264, 0.62312, ..., 0.66660, 0.75445, 0.55228]],
E
E [[0.46497, 0.02775, 0.21171, ..., 0.95536, 0.36635, 0.25849],
E [0.07972, 0.50737, 0.89399, ..., 0.55231, 0.92684, 0.13635],
E [0.12650, 0.44209, 0.57301, ..., 0.85777, 0.46557, 0.97090],
E ...,
E [0.28385, 0.05734, 0.64000, ..., 0.20046, 0.71333, 0.49261],
E [0.20087, 0.84702, 0.14897, ..., 0.80780, 0.00904, 0.00848],
E [0.19424, 0.88453, 0.68146, ..., 0.08316, 0.94820, 0.05317]]],
E
E
E [[[0.23870, 0.35561, 0.17639, ..., 0.00238, 0.29352, 0.36860],
E [0.80079, 0.41645, 0.29173, ..., 0.11632, 0.03397, 0.68710],
E [0.02625, 0.05199, 0.78951, ..., 0.30858, 0.69761, 0.05672],
E ...,
E [0.96961, 0.50332, 0.81668, ..., 0.11631, 0.98586, 0.39938],
E [0.98270, 0.41026, 0.04981, ..., 0.77621, 0.52715, 0.00565],
E [0.22623, 0.33509, 0.82225, ..., 0.29286, 0.93478, 0.60322]],
E
E [[0.96680, 0.87950, 0.06249, ..., 0.74375, 0.17989, 0.62373],
E [0.30778, 0.32249, 0.37637, ..., 0.94811, 0.40225, 0.96863],
E [0.12963, 0.14516, 0.73509, ..., 0.12775, 0.16675, 0.00557],
E ...,
E [0.35062, 0.67400, 0.59600, ..., 0.21123, 0.96146, 0.79786],
E [0.62134, 0.93015, 0.79433, ..., 0.60396, 0.61377, 0.63255],
E [0.05496, 0.12636, 0.19282, ..., 0.78986, 0.69613, 0.51850]]]])
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError
Check failure on line 74 in forge/test/mlir/operators/eltwise_nary/test_eltwise_nary.py
github-actions / TT-Forge-FE Tests
test_eltwise_nary.test_concat[-2]
ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.37342, 0.30510, 0.93200],
[0.17591, 0.26983, 0.15068, ..., 0.70108, 0.20382, 0.65105],
[0.77449, 0.43689, 0.51909, ..., 0.68696, 0.00513, 0.17565],
...,
[0.39305, 0.09317, 0.62254, ..., 0.00973, 0.92761, 0.82385],
[0.70674, 0.98937, 0.95153, ..., 0.01253, 0.07101, 0.18000],
[0.63998, 0.53875, 0.54839, ..., 0.66660, 0.75445, 0.55228]],
[[0.46497, 0.02775, 0.21171, ..., 0.86467, 0.06053, 0.45479],
[0.91060, 0.69364, 0.92123, ..., 0.85312, 0.71734, 0.45747],
[0.46920, 0.18641, 0.31915, ..., 0.13980, 0.06196, 0.30736],
...,
[0.94518, 0.64395, 0.13829, ..., 0.71353, 0.36638, 0.91316],
[0.93311, 0.99542, 0.66559, ..., 0.58988, 0.84228, 0.44822],
[0.81648, 0.89002, 0.10344, ..., 0.08316, 0.94820, 0.05317]]],
[[[0.23870, 0.35561, 0.17639, ..., 0.24331, 0.60709, 0.26819],
[0.30522, 0.16529, 0.08305, ..., 0.81413, 0.58980, 0.36324],
[0.52108, 0.94565, 0.55422, ..., 0.97859, 0.06696, 0.16339],
...,
[0.03972, 0.24895, 0.66610, ..., 0.68865, 0.19042, 0.33843],
[0.27013, 0.24829, 0.50712, ..., 0.59776, 0.20926, 0.73308],
[0.02588, 0.35485, 0.42359, ..., 0.29286, 0.93478, 0.60322]],
[[0.96680, 0.87950, 0.06249, ..., 0.36540, 0.57885, 0.54770],
[0.91077, 0.34202, 0.10087, ..., 0.01640, 0.60035, 0.97705],
[0.64389, 0.49734, 0.39635, ..., 0.73912, 0.62680, 0.28351],
...,
[0.07279, 0.03675, 0.83493, ..., 0.58213, 0.28210, 0.59853],
[0.31328, 0.66605, 0.56608, ..., 0.58852, 0.49105, 0.42558],
[0.09741, 0.48257, 0.51470, ..., 0.78986, 0.69613, 0.51850]]]]), compiled_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.24224, 0.81547, 0.79316],
[0.77449, 0.43689, 0.51909, ..., 0.57507, 0.29523, 0.79669],
[0.32508, 0.09019, 0.39364, ..., 0.68108, 0.04877, 0.81635],
...,
[0.48486, 0.93278, 0.88529, ..., 0.04771, 0.74600, 0.38908],
[0.51226, 0.94264, 0.62312, ..., 0.00973, 0.92761, 0.82385],
[0.54863, 0.67255, 0.06350, ..., 0.66660, 0.75445, 0.55228]],
[[0.46497, 0.02775, 0.21171, ..., 0.86879, 0.62003, 0.45060],
[0.46920, 0.18641, 0.31915, ..., 0.95536, 0.36635, 0.25849],
[0.07972, 0.50737, 0.89399, ..., 0.32854, 0.75889, 0.37678],
...,
[0.97866, 0.95199, 0.84927, ..., 0.80780, 0.00904, 0.00848],
[0.19424, 0.88453, 0.68146, ..., 0.71353, 0.36638, 0.91316],
[0.88112, 0.75869, 0.18943, ..., 0.08316, 0.94820, 0.05317]]],
[[[0.23870, 0.35561, 0.17639, ..., 0.11390, 0.43097, 0.35527],
[0.52108, 0.94565, 0.55422, ..., 0.00238, 0.29352, 0.36860],
[0.80079, 0.41645, 0.29173, ..., 0.25131, 0.43844, 0.42293],
...,
[0.44431, 0.44570, 0.12421, ..., 0.77621, 0.52715, 0.00565],
[0.22623, 0.33509, 0.82225, ..., 0.68865, 0.19042, 0.33843],
[0.71139, 0.30818, 0.39749, ..., 0.29286, 0.93478, 0.60322]],
[[0.96680, 0.87950, 0.06249, ..., 0.41358, 0.95622, 0.31968],
[0.64389, 0.49734, 0.39635, ..., 0.74375, 0.17989, 0.62373],
[0.30778, 0.32249, 0.37637, ..., 0.01617, 0.14207, 0.83473],
...,
[0.24800, 0.38302, 0.69543, ..., 0.60396, 0.61377, 0.63255],
[0.05496, 0.12636, 0.19282, ..., 0.58213, 0.28210, 0.59853],
[0.27152, 0.86765, 0.59504, ..., 0.78986, 0.69613, 0.51850]]]])
Raw output
inputs_and_dim = ((2, 2, 32, 32), (2, 2, 32, 32), -2)
@pytest.mark.parametrize(
"inputs_and_dim",
[
((2, 2, 32, 32), (2, 2, 32, 32), 0),
((2, 2, 32, 32), (2, 2, 32, 32), 1),
((2, 2, 32, 32), (2, 2, 32, 32), 2),
((2, 2, 32, 32), (2, 2, 32, 32), 3),
((2, 2, 32, 32), (2, 2, 32, 32), -1),
((2, 2, 32, 32), (2, 2, 32, 32), -2),
((2, 2, 32, 32), (2, 2, 32, 32), -3),
((2, 2, 32, 32), (2, 2, 32, 32), -4),
],
ids=["0", "1", "2", "3", "-1", "-2", "-3", "-4"],
)
@pytest.mark.push
def test_concat(inputs_and_dim):
in_shape1, in_shape2, dim = inputs_and_dim
class Concat(nn.Module):
def __init__(self):
super().__init__()
def forward(self, a, b):
return torch.cat((a, b), dim)
inputs = [torch.rand(in_shape1), torch.rand(in_shape2)]
framework_model = Concat()
compiled_model = forge.compile(framework_model, sample_inputs=inputs)
> verify(inputs, framework_model, compiled_model)
forge/test/mlir/operators/eltwise_nary/test_eltwise_nary.py:74:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb829046c50>
fw_out = tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.37342, 0.30510, 0.93200],
[0.17591, 0.26983, 0.15068, ..., 0...., 0.56608, ..., 0.58852, 0.49105, 0.42558],
[0.09741, 0.48257, 0.51470, ..., 0.78986, 0.69613, 0.51850]]]])
co_out = tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.24224, 0.81547, 0.79316],
[0.77449, 0.43689, 0.51909, ..., 0...., 0.19282, ..., 0.58213, 0.28210, 0.59853],
[0.27152, 0.86765, 0.59504, ..., 0.78986, 0.69613, 0.51850]]]])
def check(self, fw_out, co_out):
if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
> raise ValueError(
f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
)
E ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.37342, 0.30510, 0.93200],
E [0.17591, 0.26983, 0.15068, ..., 0.70108, 0.20382, 0.65105],
E [0.77449, 0.43689, 0.51909, ..., 0.68696, 0.00513, 0.17565],
E ...,
E [0.39305, 0.09317, 0.62254, ..., 0.00973, 0.92761, 0.82385],
E [0.70674, 0.98937, 0.95153, ..., 0.01253, 0.07101, 0.18000],
E [0.63998, 0.53875, 0.54839, ..., 0.66660, 0.75445, 0.55228]],
E
E [[0.46497, 0.02775, 0.21171, ..., 0.86467, 0.06053, 0.45479],
E [0.91060, 0.69364, 0.92123, ..., 0.85312, 0.71734, 0.45747],
E [0.46920, 0.18641, 0.31915, ..., 0.13980, 0.06196, 0.30736],
E ...,
E [0.94518, 0.64395, 0.13829, ..., 0.71353, 0.36638, 0.91316],
E [0.93311, 0.99542, 0.66559, ..., 0.58988, 0.84228, 0.44822],
E [0.81648, 0.89002, 0.10344, ..., 0.08316, 0.94820, 0.05317]]],
E
E
E [[[0.23870, 0.35561, 0.17639, ..., 0.24331, 0.60709, 0.26819],
E [0.30522, 0.16529, 0.08305, ..., 0.81413, 0.58980, 0.36324],
E [0.52108, 0.94565, 0.55422, ..., 0.97859, 0.06696, 0.16339],
E ...,
E [0.03972, 0.24895, 0.66610, ..., 0.68865, 0.19042, 0.33843],
E [0.27013, 0.24829, 0.50712, ..., 0.59776, 0.20926, 0.73308],
E [0.02588, 0.35485, 0.42359, ..., 0.29286, 0.93478, 0.60322]],
E
E [[0.96680, 0.87950, 0.06249, ..., 0.36540, 0.57885, 0.54770],
E [0.91077, 0.34202, 0.10087, ..., 0.01640, 0.60035, 0.97705],
E [0.64389, 0.49734, 0.39635, ..., 0.73912, 0.62680, 0.28351],
E ...,
E [0.07279, 0.03675, 0.83493, ..., 0.58213, 0.28210, 0.59853],
E [0.31328, 0.66605, 0.56608, ..., 0.58852, 0.49105, 0.42558],
E [0.09741, 0.48257, 0.51470, ..., 0.78986, 0.69613, 0.51850]]]]), compiled_model=tensor([[[[0.49626, 0.76822, 0.08848, ..., 0.24224, 0.81547, 0.79316],
E [0.77449, 0.43689, 0.51909, ..., 0.57507, 0.29523, 0.79669],
E [0.32508, 0.09019, 0.39364, ..., 0.68108, 0.04877, 0.81635],
E ...,
E [0.48486, 0.93278, 0.88529, ..., 0.04771, 0.74600, 0.38908],
E [0.51226, 0.94264, 0.62312, ..., 0.00973, 0.92761, 0.82385],
E [0.54863, 0.67255, 0.06350, ..., 0.66660, 0.75445, 0.55228]],
E
E [[0.46497, 0.02775, 0.21171, ..., 0.86879, 0.62003, 0.45060],
E [0.46920, 0.18641, 0.31915, ..., 0.95536, 0.36635, 0.25849],
E [0.07972, 0.50737, 0.89399, ..., 0.32854, 0.75889, 0.37678],
E ...,
E [0.97866, 0.95199, 0.84927, ..., 0.80780, 0.00904, 0.00848],
E [0.19424, 0.88453, 0.68146, ..., 0.71353, 0.36638, 0.91316],
E [0.88112, 0.75869, 0.18943, ..., 0.08316, 0.94820, 0.05317]]],
E
E
E [[[0.23870, 0.35561, 0.17639, ..., 0.11390, 0.43097, 0.35527],
E [0.52108, 0.94565, 0.55422, ..., 0.00238, 0.29352, 0.36860],
E [0.80079, 0.41645, 0.29173, ..., 0.25131, 0.43844, 0.42293],
E ...,
E [0.44431, 0.44570, 0.12421, ..., 0.77621, 0.52715, 0.00565],
E [0.22623, 0.33509, 0.82225, ..., 0.68865, 0.19042, 0.33843],
E [0.71139, 0.30818, 0.39749, ..., 0.29286, 0.93478, 0.60322]],
E
E [[0.96680, 0.87950, 0.06249, ..., 0.41358, 0.95622, 0.31968],
E [0.64389, 0.49734, 0.39635, ..., 0.74375, 0.17989, 0.62373],
E [0.30778, 0.32249, 0.37637, ..., 0.01617, 0.14207, 0.83473],
E ...,
E [0.24800, 0.38302, 0.69543, ..., 0.60396, 0.61377, 0.63255],
E [0.05496, 0.12636, 0.19282, ..., 0.58213, 0.28210, 0.59853],
E [0.27152, 0.86765, 0.59504, ..., 0.78986, 0.69613, 0.51850]]]])
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError