Skip to content

Add test for deepseek_math (#1148) #463

Add test for deepseek_math (#1148)

Add test for deepseek_math (#1148) #463

GitHub Actions / TT-Forge-FE Tests failed Feb 5, 2025 in 0s

568 tests run, 24 passed, 79 skipped, 465 failed.

Annotations

Check failure on line 77 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_less[shape_x0-shape_y0]

RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 28, 28), shape_y = (1, 128, 28, 28)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_less(shape_x, shape_y):
        class Less(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.less(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = Less()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:77: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c90430>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
          [0.18523, 0.37342, 0.30510,  ..., 0...e-01, 6.38527e-01],
          [4.80435e-01, 5.63519e-01, 1.80081e-01,  ..., 6.29593e-01, 1.39392e-01, 3.02127e-01]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
          [0.18523, 0.37342, 0.30510,  ..., 0...e-01, 6.38527e-01],
          [4.80435e-01, 5.63519e-01, 1.80081e-01,  ..., 6.29593e-01, 1.39392e-01, 3.02127e-01]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 77 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_less[shape_x2-shape_y2]

RuntimeError: Fatal error
Raw output
shape_x = (1, 256, 28, 28), shape_y = (1, 256, 28, 28)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_less(shape_x, shape_y):
        class Less(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.less(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = Less()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:77: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c5bf10>
inputs = (tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02,  ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
          [1.85231e-0... 0.67269,  ..., 0.52853, 0.02791, 0.72723],
          [0.50627, 0.20671, 0.06682,  ..., 0.64299, 0.17580, 0.76558]]]]))
inputs_and_parameters = [tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02,  ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
          [1.85231e-0... 0.67269,  ..., 0.52853, 0.02791, 0.72723],
          [0.50627, 0.20671, 0.06682,  ..., 0.64299, 0.17580, 0.76558]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 77 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_less[shape_x4-shape_y4]

RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 56, 56), shape_y = (1, 128, 56, 56)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_less(shape_x, shape_y):
        class Less(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.less(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = Less()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:77: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2dac730>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.56755, 0.83524, 0.20560],
          [0.59317, 0.11235, 0.15346,  ..., 0... 0.24537,  ..., 0.20951, 0.17554, 0.22976],
          [0.29514, 0.97587, 0.57722,  ..., 0.83887, 0.36270, 0.54569]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.56755, 0.83524, 0.20560],
          [0.59317, 0.11235, 0.15346,  ..., 0... 0.24537,  ..., 0.20951, 0.17554, 0.22976],
          [0.29514, 0.97587, 0.57722,  ..., 0.83887, 0.36270, 0.54569]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 77 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_less[shape_x6-shape_y6]

RuntimeError: Fatal error
Raw output
shape_x = (1, 512, 7, 7), shape_y = (1, 512, 7, 7)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_less(shape_x, shape_y):
        class Less(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.less(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = Less()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:77: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2cea470>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.30742, 0.63408, 0.49009],
          [0.89644, 0.45563, 0.63231,  ..., 0... 0.53460,  ..., 0.32252, 0.38427, 0.26611],
          [0.86218, 0.91274, 0.66447,  ..., 0.31686, 0.59966, 0.20105]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.30742, 0.63408, 0.49009],
          [0.89644, 0.45563, 0.63231,  ..., 0... 0.53460,  ..., 0.32252, 0.38427, 0.26611],
          [0.86218, 0.91274, 0.66447,  ..., 0.31686, 0.59966, 0.20105]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 110 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_greater[shape_x1-shape_y1]

RuntimeError: Fatal error
Raw output
shape_x = (1, 64, 28, 28), shape_y = (1, 64, 28, 28)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_greater(shape_x, shape_y):
        class Greater(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.greater(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = Greater()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c90340>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
          [0.18523, 0.37342, 0.30510,  ..., 0... 0.33696,  ..., 0.07847, 0.66520, 0.74933],
          [0.38415, 0.30600, 0.06413,  ..., 0.89975, 0.98978, 0.35272]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
          [0.18523, 0.37342, 0.30510,  ..., 0... 0.33696,  ..., 0.07847, 0.66520, 0.74933],
          [0.38415, 0.30600, 0.06413,  ..., 0.89975, 0.98978, 0.35272]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 110 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_greater[shape_x3-shape_y3]

RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 14, 14), shape_y = (1, 128, 14, 14)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_greater(shape_x, shape_y):
        class Greater(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.greater(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = Greater()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c96e00>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.40172, 0.02233, 0.16886],
          [0.29389, 0.51852, 0.69767,  ..., 0... 0.86184,  ..., 0.05997, 0.37089, 0.73024],
          [0.12554, 0.51805, 0.53460,  ..., 0.31686, 0.59966, 0.20105]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.40172, 0.02233, 0.16886],
          [0.29389, 0.51852, 0.69767,  ..., 0... 0.86184,  ..., 0.05997, 0.37089, 0.73024],
          [0.12554, 0.51805, 0.53460,  ..., 0.31686, 0.59966, 0.20105]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 110 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_greater[shape_x5-shape_y5]

ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[False, False, False,  ...,  True, False, False],
          [ True, False,  True,  ...,  True, False, False],
          [False, False,  True,  ..., False,  True,  True],
          ...,
          [ True, False,  True,  ...,  True,  True, False],
          [ True,  True, False,  ..., False, False, False],
          [ True, False,  True,  ...,  True,  True, False]],

         [[ True, False, False,  ...,  True,  True, False],
          [False,  True, False,  ...,  True, False, False],
          [ True,  True,  True,  ..., False, False, False],
          ...,
          [ True, False,  True,  ..., False, False, False],
          [False, False,  True,  ...,  True, False,  True],
          [ True, False, False,  ...,  True,  True, False]],

         [[False,  True,  True,  ..., False, False, False],
          [False,  True,  True,  ...,  True,  True, False],
          [ True,  True,  True,  ...,  True, False, False],
          ...,
          [False, False,  True,  ..., False,  True, False],
          [False, False,  True,  ...,  True, False, False],
          [ True,  True, False,  ..., False,  True, False]],

         ...,

         [[ True,  True,  True,  ...,  True, False,  True],
          [False,  True, False,  ..., False,  True, False],
          [ True,  True,  True,  ...,  True, False,  True],
          ...,
          [ True, False, False,  ..., False, False,  True],
          [ True, False, False,  ...,  True,  True, False],
          [False, False,  True,  ...,  True, False,  True]],

         [[ True, False,  True,  ...,  True,  True,  True],
          [ True, False,  True,  ..., False,  True,  True],
          [ True, False,  True,  ..., False, False,  True],
          ...,
          [False,  True,  True,  ...,  True, False, False],
          [ True,  True, False,  ...,  True, False, False],
          [ True,  True, False,  ...,  True,  True,  True]],

         [[False,  True,  True,  ...,  True,  True,  True],
          [ True, False,  True,  ...,  True,  True,  True],
          [False,  True,  True,  ...,  True, False,  True],
          ...,
          [False,  True, False,  ...,  True, False,  True],
          [ True,  True, False,  ..., False,  True,  True],
          [False,  True, False,  ..., False, False,  True]]]]), compiled_model=tensor([[[[0., 0., 0.,  ..., 1., 0., 1.],
          [0., 1., 0.,  ..., 0., 1., 0.],
          [0., 0., 1.,  ..., 0., 1., 1.],
          ...,
          [0., 1., 0.,  ..., 0., 0., 0.],
          [1., 0., 1.,  ..., 0., 0., 0.],
          [1., 1., 0.,  ..., 1., 1., 0.]],

         [[1., 0., 0.,  ..., 0., 0., 1.],
          [0., 1., 1.,  ..., 0., 1., 1.],
          [1., 1., 0.,  ..., 0., 1., 1.],
          ...,
          [0., 0., 0.,  ..., 0., 1., 0.],
          [0., 1., 1.,  ..., 0., 1., 1.],
          [0., 1., 0.,  ..., 1., 1., 0.]],

         [[0., 1., 1.,  ..., 1., 0., 0.],
          [0., 1., 0.,  ..., 1., 0., 1.],
          [1., 0., 1.,  ..., 0., 0., 1.],
          ...,
          [1., 0., 1.,  ..., 1., 0., 1.],
          [0., 1., 1.,  ..., 0., 1., 1.],
          [0., 0., 1.,  ..., 0., 1., 0.]],

         ...,

         [[1., 1., 1.,  ..., 0., 1., 0.],
          [1., 0., 1.,  ..., 1., 0., 0.],
          [1., 0., 1.,  ..., 1., 1., 1.],
          ...,
          [1., 1., 1.,  ..., 1., 0., 0.],
          [1., 1., 1.,  ..., 1., 0., 1.],
          [1., 1., 1.,  ..., 1., 0., 1.]],

         [[1., 0., 1.,  ..., 0., 0., 1.],
          [0., 0., 0.,  ..., 0., 1., 1.],
          [0., 1., 1.,  ..., 1., 1., 0.],
          ...,
          [0., 1., 0.,  ..., 0., 0., 1.],
          [1., 0., 1.,  ..., 0., 0., 0.],
          [1., 0., 0.,  ..., 1., 1., 1.]],

         [[0., 1., 1.,  ..., 0., 1., 1.],
          [1., 1., 0.,  ..., 1., 1., 1.],
          [1., 0., 0.,  ..., 0., 1., 0.],
          ...,
          [1., 0., 1.,  ..., 0., 1., 0.],
          [0., 1., 0.,  ..., 0., 0., 0.],
          [1., 1., 0.,  ..., 0., 0., 1.]]]])
Raw output
shape_x = (1, 32, 64, 64), shape_y = (1, 32, 64, 64)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_greater(shape_x, shape_y):
        class Greater(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.greater(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = Greater()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
    verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb619fc4580>
fw_out = tensor([[[[False, False, False,  ...,  True, False, False],
          [ True, False,  True,  ...,  True, False, False]...       [ True,  True, False,  ..., False,  True,  True],
          [False,  True, False,  ..., False, False,  True]]]])
co_out = tensor([[[[0., 0., 0.,  ..., 1., 0., 1.],
          [0., 1., 0.,  ..., 0., 1., 0.],
          [0., 0., 1.,  ..., 0., 1... [1., 0., 1.,  ..., 0., 1., 0.],
          [0., 1., 0.,  ..., 0., 0., 0.],
          [1., 1., 0.,  ..., 0., 0., 1.]]]])

    def check(self, fw_out, co_out):
        if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
>           raise ValueError(
                f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
            )
E           ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[False, False, False,  ...,  True, False, False],
E                     [ True, False,  True,  ...,  True, False, False],
E                     [False, False,  True,  ..., False,  True,  True],
E                     ...,
E                     [ True, False,  True,  ...,  True,  True, False],
E                     [ True,  True, False,  ..., False, False, False],
E                     [ True, False,  True,  ...,  True,  True, False]],
E           
E                    [[ True, False, False,  ...,  True,  True, False],
E                     [False,  True, False,  ...,  True, False, False],
E                     [ True,  True,  True,  ..., False, False, False],
E                     ...,
E                     [ True, False,  True,  ..., False, False, False],
E                     [False, False,  True,  ...,  True, False,  True],
E                     [ True, False, False,  ...,  True,  True, False]],
E           
E                    [[False,  True,  True,  ..., False, False, False],
E                     [False,  True,  True,  ...,  True,  True, False],
E                     [ True,  True,  True,  ...,  True, False, False],
E                     ...,
E                     [False, False,  True,  ..., False,  True, False],
E                     [False, False,  True,  ...,  True, False, False],
E                     [ True,  True, False,  ..., False,  True, False]],
E           
E                    ...,
E           
E                    [[ True,  True,  True,  ...,  True, False,  True],
E                     [False,  True, False,  ..., False,  True, False],
E                     [ True,  True,  True,  ...,  True, False,  True],
E                     ...,
E                     [ True, False, False,  ..., False, False,  True],
E                     [ True, False, False,  ...,  True,  True, False],
E                     [False, False,  True,  ...,  True, False,  True]],
E           
E                    [[ True, False,  True,  ...,  True,  True,  True],
E                     [ True, False,  True,  ..., False,  True,  True],
E                     [ True, False,  True,  ..., False, False,  True],
E                     ...,
E                     [False,  True,  True,  ...,  True, False, False],
E                     [ True,  True, False,  ...,  True, False, False],
E                     [ True,  True, False,  ...,  True,  True,  True]],
E           
E                    [[False,  True,  True,  ...,  True,  True,  True],
E                     [ True, False,  True,  ...,  True,  True,  True],
E                     [False,  True,  True,  ...,  True, False,  True],
E                     ...,
E                     [False,  True, False,  ...,  True, False,  True],
E                     [ True,  True, False,  ..., False,  True,  True],
E                     [False,  True, False,  ..., False, False,  True]]]]), compiled_model=tensor([[[[0., 0., 0.,  ..., 1., 0., 1.],
E                     [0., 1., 0.,  ..., 0., 1., 0.],
E                     [0., 0., 1.,  ..., 0., 1., 1.],
E                     ...,
E                     [0., 1., 0.,  ..., 0., 0., 0.],
E                     [1., 0., 1.,  ..., 0., 0., 0.],
E                     [1., 1., 0.,  ..., 1., 1., 0.]],
E           
E                    [[1., 0., 0.,  ..., 0., 0., 1.],
E                     [0., 1., 1.,  ..., 0., 1., 1.],
E                     [1., 1., 0.,  ..., 0., 1., 1.],
E                     ...,
E                     [0., 0., 0.,  ..., 0., 1., 0.],
E                     [0., 1., 1.,  ..., 0., 1., 1.],
E                     [0., 1., 0.,  ..., 1., 1., 0.]],
E           
E                    [[0., 1., 1.,  ..., 1., 0., 0.],
E                     [0., 1., 0.,  ..., 1., 0., 1.],
E                     [1., 0., 1.,  ..., 0., 0., 1.],
E                     ...,
E                     [1., 0., 1.,  ..., 1., 0., 1.],
E                     [0., 1., 1.,  ..., 0., 1., 1.],
E                     [0., 0., 1.,  ..., 0., 1., 0.]],
E           
E                    ...,
E           
E                    [[1., 1., 1.,  ..., 0., 1., 0.],
E                     [1., 0., 1.,  ..., 1., 0., 0.],
E                     [1., 0., 1.,  ..., 1., 1., 1.],
E                     ...,
E                     [1., 1., 1.,  ..., 1., 0., 0.],
E                     [1., 1., 1.,  ..., 1., 0., 1.],
E                     [1., 1., 1.,  ..., 1., 0., 1.]],
E           
E                    [[1., 0., 1.,  ..., 0., 0., 1.],
E                     [0., 0., 0.,  ..., 0., 1., 1.],
E                     [0., 1., 1.,  ..., 1., 1., 0.],
E                     ...,
E                     [0., 1., 0.,  ..., 0., 0., 1.],
E                     [1., 0., 1.,  ..., 0., 0., 0.],
E                     [1., 0., 0.,  ..., 1., 1., 1.]],
E           
E                    [[0., 1., 1.,  ..., 0., 1., 1.],
E                     [1., 1., 0.,  ..., 1., 1., 1.],
E                     [1., 0., 0.,  ..., 0., 1., 0.],
E                     ...,
E                     [1., 0., 1.,  ..., 0., 1., 0.],
E                     [0., 1., 0.,  ..., 0., 0., 0.],
E                     [1., 1., 0.,  ..., 0., 0., 1.]]]])

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError

Check failure on line 110 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_greater[shape_x7-shape_y7]

ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[False,  True, False,  ..., False, False,  True],
          [False, False, False,  ...,  True, False,  True],
          [ True, False, False,  ..., False, False, False],
          ...,
          [ True,  True,  True,  ..., False,  True,  True],
          [False, False,  True,  ...,  True, False,  True],
          [ True, False,  True,  ...,  True, False,  True]],

         [[False, False, False,  ...,  True, False, False],
          [ True, False,  True,  ...,  True,  True,  True],
          [False, False,  True,  ..., False, False, False],
          ...,
          [ True,  True, False,  ..., False,  True, False],
          [ True, False,  True,  ..., False,  True, False],
          [ True, False, False,  ..., False,  True, False]],

         [[False,  True, False,  ..., False,  True,  True],
          [False, False, False,  ...,  True,  True, False],
          [ True, False,  True,  ...,  True, False, False],
          ...,
          [False, False,  True,  ..., False, False,  True],
          [ True,  True, False,  ..., False,  True, False],
          [False,  True, False,  ...,  True, False,  True]],

         ...,

         [[ True, False,  True,  ..., False,  True, False],
          [False, False,  True,  ...,  True, False, False],
          [False,  True, False,  ...,  True, False,  True],
          ...,
          [ True,  True,  True,  ..., False,  True,  True],
          [False, False,  True,  ...,  True, False,  True],
          [ True,  True, False,  ...,  True, False, False]],

         [[ True,  True, False,  ...,  True, False,  True],
          [ True, False,  True,  ..., False, False, False],
          [False, False, False,  ...,  True, False,  True],
          ...,
          [False, False,  True,  ..., False,  True,  True],
          [ True,  True, False,  ...,  True, False, False],
          [ True, False, False,  ...,  True,  True,  True]],

         [[False,  True,  True,  ...,  True,  True,  True],
          [ True,  True,  True,  ...,  True, False, False],
          [ True, False, False,  ..., False,  True, False],
          ...,
          [ True,  True, False,  ...,  True, False, False],
          [ True,  True,  True,  ..., False,  True,  True],
          [ True,  True,  True,  ...,  True, False, False]]]]), compiled_model=tensor([[[[0., 1., 0.,  ..., 0., 1., 1.],
          [1., 0., 0.,  ..., 0., 1., 1.],
          [0., 0., 1.,  ..., 1., 0., 1.],
          ...,
          [0., 0., 0.,  ..., 1., 0., 1.],
          [0., 1., 1.,  ..., 0., 1., 1.],
          [0., 1., 0.,  ..., 1., 0., 1.]],

         [[0., 0., 0.,  ..., 1., 1., 0.],
          [0., 0., 1.,  ..., 1., 1., 0.],
          [0., 0., 1.,  ..., 0., 1., 1.],
          ...,
          [0., 0., 1.,  ..., 1., 0., 1.],
          [1., 1., 0.,  ..., 0., 1., 0.],
          [0., 0., 1.,  ..., 0., 1., 0.]],

         [[0., 1., 0.,  ..., 0., 1., 1.],
          [1., 0., 1.,  ..., 0., 0., 1.],
          [1., 1., 0.,  ..., 0., 0., 1.],
          ...,
          [1., 0., 1.,  ..., 1., 1., 1.],
          [0., 0., 0.,  ..., 0., 0., 1.],
          [1., 0., 1.,  ..., 1., 0., 1.]],

         ...,

         [[1., 0., 1.,  ..., 0., 0., 1.],
          [0., 1., 0.,  ..., 0., 0., 0.],
          [1., 1., 1.,  ..., 0., 1., 0.],
          ...,
          [0., 0., 1.,  ..., 0., 1., 0.],
          [1., 1., 1.,  ..., 0., 1., 1.],
          [1., 1., 0.,  ..., 1., 0., 0.]],

         [[1., 1., 0.,  ..., 1., 1., 1.],
          [0., 0., 0.,  ..., 0., 0., 0.],
          [0., 1., 1.,  ..., 0., 0., 0.],
          ...,
          [1., 0., 0.,  ..., 1., 1., 1.],
          [0., 0., 1.,  ..., 0., 1., 1.],
          [0., 0., 1.,  ..., 1., 1., 1.]],

         [[0., 1., 1.,  ..., 0., 0., 0.],
          [1., 0., 0.,  ..., 1., 1., 0.],
          [1., 0., 1.,  ..., 1., 1., 1.],
          ...,
          [0., 1., 1.,  ..., 1., 1., 1.],
          [1., 1., 0.,  ..., 1., 0., 0.],
          [0., 1., 1.,  ..., 1., 0., 0.]]]])
Raw output
shape_x = (1, 32, 32, 32), shape_y = (1, 32, 32, 32)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_greater(shape_x, shape_y):
        class Greater(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.greater(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = Greater()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:110: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
    verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb619fc4580>
fw_out = tensor([[[[False,  True, False,  ..., False, False,  True],
          [False, False, False,  ...,  True, False,  True]...       [ True,  True,  True,  ..., False,  True,  True],
          [ True,  True,  True,  ...,  True, False, False]]]])
co_out = tensor([[[[0., 1., 0.,  ..., 0., 1., 1.],
          [1., 0., 0.,  ..., 0., 1., 1.],
          [0., 0., 1.,  ..., 1., 0... [0., 1., 1.,  ..., 1., 1., 1.],
          [1., 1., 0.,  ..., 1., 0., 0.],
          [0., 1., 1.,  ..., 1., 0., 0.]]]])

    def check(self, fw_out, co_out):
        if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
>           raise ValueError(
                f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
            )
E           ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[[False,  True, False,  ..., False, False,  True],
E                     [False, False, False,  ...,  True, False,  True],
E                     [ True, False, False,  ..., False, False, False],
E                     ...,
E                     [ True,  True,  True,  ..., False,  True,  True],
E                     [False, False,  True,  ...,  True, False,  True],
E                     [ True, False,  True,  ...,  True, False,  True]],
E           
E                    [[False, False, False,  ...,  True, False, False],
E                     [ True, False,  True,  ...,  True,  True,  True],
E                     [False, False,  True,  ..., False, False, False],
E                     ...,
E                     [ True,  True, False,  ..., False,  True, False],
E                     [ True, False,  True,  ..., False,  True, False],
E                     [ True, False, False,  ..., False,  True, False]],
E           
E                    [[False,  True, False,  ..., False,  True,  True],
E                     [False, False, False,  ...,  True,  True, False],
E                     [ True, False,  True,  ...,  True, False, False],
E                     ...,
E                     [False, False,  True,  ..., False, False,  True],
E                     [ True,  True, False,  ..., False,  True, False],
E                     [False,  True, False,  ...,  True, False,  True]],
E           
E                    ...,
E           
E                    [[ True, False,  True,  ..., False,  True, False],
E                     [False, False,  True,  ...,  True, False, False],
E                     [False,  True, False,  ...,  True, False,  True],
E                     ...,
E                     [ True,  True,  True,  ..., False,  True,  True],
E                     [False, False,  True,  ...,  True, False,  True],
E                     [ True,  True, False,  ...,  True, False, False]],
E           
E                    [[ True,  True, False,  ...,  True, False,  True],
E                     [ True, False,  True,  ..., False, False, False],
E                     [False, False, False,  ...,  True, False,  True],
E                     ...,
E                     [False, False,  True,  ..., False,  True,  True],
E                     [ True,  True, False,  ...,  True, False, False],
E                     [ True, False, False,  ...,  True,  True,  True]],
E           
E                    [[False,  True,  True,  ...,  True,  True,  True],
E                     [ True,  True,  True,  ...,  True, False, False],
E                     [ True, False, False,  ..., False,  True, False],
E                     ...,
E                     [ True,  True, False,  ...,  True, False, False],
E                     [ True,  True,  True,  ..., False,  True,  True],
E                     [ True,  True,  True,  ...,  True, False, False]]]]), compiled_model=tensor([[[[0., 1., 0.,  ..., 0., 1., 1.],
E                     [1., 0., 0.,  ..., 0., 1., 1.],
E                     [0., 0., 1.,  ..., 1., 0., 1.],
E                     ...,
E                     [0., 0., 0.,  ..., 1., 0., 1.],
E                     [0., 1., 1.,  ..., 0., 1., 1.],
E                     [0., 1., 0.,  ..., 1., 0., 1.]],
E           
E                    [[0., 0., 0.,  ..., 1., 1., 0.],
E                     [0., 0., 1.,  ..., 1., 1., 0.],
E                     [0., 0., 1.,  ..., 0., 1., 1.],
E                     ...,
E                     [0., 0., 1.,  ..., 1., 0., 1.],
E                     [1., 1., 0.,  ..., 0., 1., 0.],
E                     [0., 0., 1.,  ..., 0., 1., 0.]],
E           
E                    [[0., 1., 0.,  ..., 0., 1., 1.],
E                     [1., 0., 1.,  ..., 0., 0., 1.],
E                     [1., 1., 0.,  ..., 0., 0., 1.],
E                     ...,
E                     [1., 0., 1.,  ..., 1., 1., 1.],
E                     [0., 0., 0.,  ..., 0., 0., 1.],
E                     [1., 0., 1.,  ..., 1., 0., 1.]],
E           
E                    ...,
E           
E                    [[1., 0., 1.,  ..., 0., 0., 1.],
E                     [0., 1., 0.,  ..., 0., 0., 0.],
E                     [1., 1., 1.,  ..., 0., 1., 0.],
E                     ...,
E                     [0., 0., 1.,  ..., 0., 1., 0.],
E                     [1., 1., 1.,  ..., 0., 1., 1.],
E                     [1., 1., 0.,  ..., 1., 0., 0.]],
E           
E                    [[1., 1., 0.,  ..., 1., 1., 1.],
E                     [0., 0., 0.,  ..., 0., 0., 0.],
E                     [0., 1., 1.,  ..., 0., 0., 0.],
E                     ...,
E                     [1., 0., 0.,  ..., 1., 1., 1.],
E                     [0., 0., 1.,  ..., 0., 1., 1.],
E                     [0., 0., 1.,  ..., 1., 1., 1.]],
E           
E                    [[0., 1., 1.,  ..., 0., 0., 0.],
E                     [1., 0., 0.,  ..., 1., 1., 0.],
E                     [1., 0., 1.,  ..., 1., 1., 1.],
E                     ...,
E                     [0., 1., 1.,  ..., 1., 1., 1.],
E                     [1., 1., 0.,  ..., 1., 0., 0.],
E                     [0., 1., 1.,  ..., 1., 0., 0.]]]])

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError

Check failure on line 143 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_not_equal[shape_x0-shape_y0]

RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 28, 28), shape_y = (1, 128, 28, 28)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_not_equal(shape_x, shape_y):
        class NotEqual(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.ne(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = NotEqual()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:143: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c15060>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
          [0.18523, 0.37342, 0.30510,  ..., 0...e-01, 6.38527e-01],
          [4.80435e-01, 5.63519e-01, 1.80081e-01,  ..., 6.29593e-01, 1.39392e-01, 3.02127e-01]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
          [0.18523, 0.37342, 0.30510,  ..., 0...e-01, 6.38527e-01],
          [4.80435e-01, 5.63519e-01, 1.80081e-01,  ..., 6.29593e-01, 1.39392e-01, 3.02127e-01]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 143 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_not_equal[shape_x2-shape_y2]

RuntimeError: Fatal error
Raw output
shape_x = (1, 256, 28, 28), shape_y = (1, 256, 28, 28)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_not_equal(shape_x, shape_y):
        class NotEqual(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.ne(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = NotEqual()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:143: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b094c8b0>
inputs = (tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02,  ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
          [1.85231e-0... 0.67269,  ..., 0.52853, 0.02791, 0.72723],
          [0.50627, 0.20671, 0.06682,  ..., 0.64299, 0.17580, 0.76558]]]]))
inputs_and_parameters = [tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02,  ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
          [1.85231e-0... 0.67269,  ..., 0.52853, 0.02791, 0.72723],
          [0.50627, 0.20671, 0.06682,  ..., 0.64299, 0.17580, 0.76558]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 143 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_not_equal[shape_x4-shape_y4]

RuntimeError: Fatal error
Raw output
shape_x = (1, 128, 56, 56), shape_y = (1, 128, 56, 56)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_not_equal(shape_x, shape_y):
        class NotEqual(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.ne(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = NotEqual()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:143: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2cbace0>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.56755, 0.83524, 0.20560],
          [0.59317, 0.11235, 0.15346,  ..., 0... 0.24537,  ..., 0.20951, 0.17554, 0.22976],
          [0.29514, 0.97587, 0.57722,  ..., 0.83887, 0.36270, 0.54569]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.56755, 0.83524, 0.20560],
          [0.59317, 0.11235, 0.15346,  ..., 0... 0.24537,  ..., 0.20951, 0.17554, 0.22976],
          [0.29514, 0.97587, 0.57722,  ..., 0.83887, 0.36270, 0.54569]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 143 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_not_equal[shape_x6-shape_y6]

RuntimeError: Fatal error
Raw output
shape_x = (1, 512, 7, 7), shape_y = (1, 512, 7, 7)

    @pytest.mark.parametrize(
        "shape_x, shape_y",
        [
            ((1, 128, 28, 28), (1, 128, 28, 28)),
            ((1, 64, 28, 28), (1, 64, 28, 28)),
            ((1, 256, 28, 28), (1, 256, 28, 28)),
            ((1, 128, 14, 14), (1, 128, 14, 14)),
            ((1, 128, 56, 56), (1, 128, 56, 56)),
            ((1, 32, 64, 64), (1, 32, 64, 64)),
            ((1, 512, 7, 7), (1, 512, 7, 7)),
            ((1, 32, 32, 32), (1, 32, 32, 32)),
        ],
    )
    @pytest.mark.push
    def test_not_equal(shape_x, shape_y):
        class NotEqual(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.ne(x, y)
    
        x = torch.rand(shape_x)
        y = torch.rand(shape_y)
    
        inputs = [x, y]
    
        framework_model = NotEqual()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:143: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2dae860>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.30742, 0.63408, 0.49009],
          [0.89644, 0.45563, 0.63231,  ..., 0... 0.53460,  ..., 0.32252, 0.38427, 0.26611],
          [0.86218, 0.91274, 0.66447,  ..., 0.31686, 0.59966, 0.20105]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.30742, 0.63408, 0.49009],
          [0.89644, 0.45563, 0.63231,  ..., 0... 0.53460,  ..., 0.32252, 0.38427, 0.26611],
          [0.86218, 0.91274, 0.66447,  ..., 0.31686, 0.59966, 0.20105]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape0]

RuntimeError: Fatal error
Raw output
shape = (1, 128, 28, 28)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2b00b50>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
          [0.18523, 0.37342, 0.30510,  ..., 0... 0.67392,  ..., 0.15695, 1.33041, 1.49866],
          [0.76830, 0.61201, 0.12826,  ..., 1.79951, 1.97955, 0.70544]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
          [0.18523, 0.37342, 0.30510,  ..., 0... 0.67392,  ..., 0.15695, 1.33041, 1.49866],
          [0.76830, 0.61201, 0.12826,  ..., 1.79951, 1.97955, 0.70544]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape2]

RuntimeError: Fatal error
Raw output
shape = (1, 256, 28, 28)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b0902d70>
inputs = (tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02,  ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
          [1.85231e-0...e+00, 1.27705e+00],
          [9.60871e-01, 1.12704e+00, 3.60162e-01,  ..., 1.25919e+00, 2.78783e-01, 6.04253e-01]]]]))
inputs_and_parameters = [tensor([[[[4.96257e-01, 7.68222e-01, 8.84774e-02,  ..., 5.52907e-01, 9.52738e-01, 3.61648e-02],
          [1.85231e-0...e+00, 1.27705e+00],
          [9.60871e-01, 1.12704e+00, 3.60162e-01,  ..., 1.25919e+00, 2.78783e-01, 6.04253e-01]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape4]

RuntimeError: Fatal error
Raw output
shape = (1, 128, 56, 56)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c939a0>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.56755, 0.83524, 0.20560],
          [0.59317, 0.11235, 0.15346,  ..., 0... 0.51179,  ..., 1.10265, 1.07445, 1.95150],
          [0.46215, 0.12042, 1.34537,  ..., 1.28598, 0.35160, 1.53116]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.56755, 0.83524, 0.20560],
          [0.59317, 0.11235, 0.15346,  ..., 0... 0.51179,  ..., 1.10265, 1.07445, 1.95150],
          [0.46215, 0.12042, 1.34537,  ..., 1.28598, 0.35160, 1.53116]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape6]

RuntimeError: Fatal error
Raw output
shape = (1, 512, 7, 7)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2cb8670>
inputs = (tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.30742, 0.63408, 0.49009],
          [0.89644, 0.45563, 0.63231,  ..., 0... 0.23916,  ..., 0.06386, 1.28559, 1.67222],
          [1.13158, 1.59839, 0.17406,  ..., 0.48676, 1.18056, 1.04700]]]]))
inputs_and_parameters = [tensor([[[[0.49626, 0.76822, 0.08848,  ..., 0.30742, 0.63408, 0.49009],
          [0.89644, 0.45563, 0.63231,  ..., 0... 0.23916,  ..., 0.06386, 1.28559, 1.67222],
          [1.13158, 1.59839, 0.17406,  ..., 0.48676, 1.18056, 1.04700]]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape8]

RuntimeError: Fatal error
Raw output
shape = (128, 28, 28)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c5bfa0>
inputs = (tensor([[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
         [0.18523, 0.37342, 0.30510,  ..., 0.5...3, 0.67392,  ..., 0.15695, 1.33041, 1.49866],
         [0.76830, 0.61201, 0.12826,  ..., 1.79951, 1.97955, 0.70544]]]))
inputs_and_parameters = [tensor([[[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
         [0.18523, 0.37342, 0.30510,  ..., 0.5...3, 0.67392,  ..., 0.15695, 1.33041, 1.49866],
         [0.76830, 0.61201, 0.12826,  ..., 1.79951, 1.97955, 0.70544]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape11]

RuntimeError: Fatal error
Raw output
shape = (128, 14, 14)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c96bc0>
inputs = (tensor([[[0.49626, 0.76822, 0.08848,  ..., 0.40172, 0.02233, 0.16886],
         [0.29389, 0.51852, 0.69767,  ..., 0.5...4, 0.18371,  ..., 0.59819, 0.52339, 0.93409],
         [1.53317, 1.88780, 0.23916,  ..., 0.48676, 1.18056, 1.04700]]]))
inputs_and_parameters = [tensor([[[0.49626, 0.76822, 0.08848,  ..., 0.40172, 0.02233, 0.16886],
         [0.29389, 0.51852, 0.69767,  ..., 0.5...4, 0.18371,  ..., 0.59819, 0.52339, 0.93409],
         [1.53317, 1.88780, 0.23916,  ..., 0.48676, 1.18056, 1.04700]]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape17]

RuntimeError: Fatal error
Raw output
shape = (64, 28)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2beb700>
inputs = (tensor([[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
        [0.18523, 0.37342, 0.30510,  ..., 0.567...215, 1.84882,  ..., 1.15385, 0.43569, 0.68789],
        [1.78722, 0.50906, 0.93987,  ..., 1.55895, 1.06794, 1.79717]]))
inputs_and_parameters = [tensor([[0.49626, 0.76822, 0.08848,  ..., 0.55291, 0.95274, 0.03616],
        [0.18523, 0.37342, 0.30510,  ..., 0.567...215, 1.84882,  ..., 1.15385, 0.43569, 0.68789],
        [1.78722, 0.50906, 0.93987,  ..., 1.55895, 1.06794, 1.79717]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape19]

RuntimeError: Fatal error
Raw output
shape = (128, 14)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c59840>
inputs = (tensor([[0.49626, 0.76822, 0.08848,  ..., 0.40172, 0.02233, 0.16886],
        [0.29389, 0.51852, 0.69767,  ..., 0.552...906, 0.93987,  ..., 1.84339, 0.45132, 0.68730],
        [1.36408, 1.79865, 1.95659,  ..., 1.55895, 1.06794, 1.79717]]))
inputs_and_parameters = [tensor([[0.49626, 0.76822, 0.08848,  ..., 0.40172, 0.02233, 0.16886],
        [0.29389, 0.51852, 0.69767,  ..., 0.552...906, 0.93987,  ..., 1.84339, 0.45132, 0.68730],
        [1.36408, 1.79865, 1.95659,  ..., 1.55895, 1.06794, 1.79717]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape20]

RuntimeError: Fatal error
Raw output
shape = (128, 56)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2c90f40>
inputs = (tensor([[0.49626, 0.76822, 0.08848,  ..., 0.56755, 0.83524, 0.20560],
        [0.59317, 0.11235, 0.15346,  ..., 0.575...018, 1.64450,  ..., 1.10540, 1.42029, 0.52418],
        [1.53309, 1.29671, 1.77202,  ..., 0.58573, 1.86955, 1.20645]]))
inputs_and_parameters = [tensor([[0.49626, 0.76822, 0.08848,  ..., 0.56755, 0.83524, 0.20560],
        [0.59317, 0.11235, 0.15346,  ..., 0.575...018, 1.64450,  ..., 1.10540, 1.42029, 0.52418],
        [1.53309, 1.29671, 1.77202,  ..., 0.58573, 1.86955, 1.20645]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 192 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_equal[shape22]

RuntimeError: Fatal error
Raw output
shape = (512, 7)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 128, 28, 28),
            (1, 64, 28, 28),
            (1, 256, 28, 28),
            (1, 128, 14, 14),
            (1, 128, 56, 56),
            (1, 32, 64, 64),
            (1, 512, 7, 7),
            (1, 32, 32, 32),
            (128, 28, 28),
            (64, 28, 28),
            (256, 28, 28),
            (128, 14, 14),
            (128, 56, 56),
            (32, 64, 64),
            (512, 7, 7),
            (32, 32, 32),
            (128, 28),
            (64, 28),
            (256, 28),
            (128, 14),
            (128, 56),
            (32, 64),
            (512, 7),
            (32, 32),
        ],
    )
    @pytest.mark.push
    def test_equal(shape):
        class Equal(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, x, y):
                return torch.eq(x, y)
    
        x = torch.rand(shape)
        y = x * 2.0
    
        inputs = [x, y]
    
        framework_model = Equal()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:192: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2a50ee0>
inputs = (tensor([[0.49626, 0.76822, 0.08848,  ..., 0.30742, 0.63408, 0.49009],
        [0.89644, 0.45563, 0.63231,  ..., 0.401...054, 0.72265,  ..., 0.27104, 0.14659, 1.98740],
        [0.13614, 1.85093, 1.26928,  ..., 1.87098, 0.72776, 0.58766]]))
inputs_and_parameters = [tensor([[0.49626, 0.76822, 0.08848,  ..., 0.30742, 0.63408, 0.49009],
        [0.89644, 0.45563, 0.63231,  ..., 0.401...054, 0.72265,  ..., 0.27104, 0.14659, 1.98740],
        [0.13614, 1.85093, 1.26928,  ..., 1.87098, 0.72776, 0.58766]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 209 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_add

ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[0.73496, 1.12383, 0.26487,  ..., 0.61673, 0.91219, 1.20019],
         [0.48113, 0.43513, 0.23373,  ..., 1.51522, 0.79363, 1.01430],
         [1.29557, 1.38254, 1.07331,  ..., 1.66554, 0.07209, 0.33904],
         ...,
         [1.72863, 1.08533, 1.57207,  ..., 0.28559, 1.11634, 1.19057],
         [1.12506, 0.98804, 1.76224,  ..., 0.88192, 1.23629, 1.08956],
         [0.90576, 0.71437, 1.37417,  ..., 0.84095, 0.90509, 1.49623]],

        [[1.43176, 0.90725, 0.27420,  ..., 1.23006, 0.63938, 1.00249],
         [1.82137, 1.03566, 1.02211,  ..., 0.86952, 1.31769, 1.43452],
         [1.11309, 0.68375, 0.71550,  ..., 0.87892, 0.68875, 0.59087],
         ...,
         [1.68523, 0.71244, 1.18100,  ..., 0.36217, 1.16124, 0.99175],
         [1.00374, 0.47009, 1.67520,  ..., 1.36567, 1.63941, 1.35224],
         [0.85810, 1.59082, 0.37622,  ..., 1.30486, 1.44561, 0.35451]]]), compiled_model=tensor([[[0.73496, 1.12383, 0.26487,  ..., 0.35614, 1.24644, 1.14843],
         [1.29557, 1.38254, 1.07331,  ..., 0.57745, 0.58875, 1.16529],
         [1.12588, 0.50664, 0.68537,  ..., 0.93239, 0.48721, 1.23928],
         ...,
         [1.03377, 0.77774, 1.39789,  ..., 1.55659, 0.84347, 1.39830],
         [1.40545, 0.85635, 1.53476,  ..., 0.28559, 1.11634, 1.19057],
         [1.37749, 1.17447, 0.92949,  ..., 0.84095, 0.90509, 1.49623]],

        [[1.43176, 0.90725, 0.27420,  ..., 1.28238, 1.57625, 0.77028],
         [1.11309, 0.68375, 0.71550,  ..., 1.69911, 0.54624, 0.88222],
         [0.38749, 0.82986, 1.27037,  ..., 0.34471, 0.90097, 1.21151],
         ...,
         [0.37735, 0.92445, 1.47703,  ..., 1.51981, 1.26251, 0.97893],
         [0.78997, 1.28204, 0.11506,  ..., 0.36217, 1.16124, 0.99175],
         [1.07781, 0.75897, 1.22801,  ..., 1.30486, 1.44561, 0.35451]]])
Raw output
@pytest.mark.push
    def test_add():
        class Add(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, a, b):
                return a + b
    
        inputs = [torch.rand(2, 32, 32), torch.rand(2, 32, 32)]
    
        framework_model = Add()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model)

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:209: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
    verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb619fc4580>
fw_out = tensor([[[0.73496, 1.12383, 0.26487,  ..., 0.61673, 0.91219, 1.20019],
         [0.48113, 0.43513, 0.23373,  ..., 1.51...09, 1.67520,  ..., 1.36567, 1.63941, 1.35224],
         [0.85810, 1.59082, 0.37622,  ..., 1.30486, 1.44561, 0.35451]]])
co_out = tensor([[[0.73496, 1.12383, 0.26487,  ..., 0.35614, 1.24644, 1.14843],
         [1.29557, 1.38254, 1.07331,  ..., 0.57...04, 0.11506,  ..., 0.36217, 1.16124, 0.99175],
         [1.07781, 0.75897, 1.22801,  ..., 1.30486, 1.44561, 0.35451]]])

    def check(self, fw_out, co_out):
        if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
>           raise ValueError(
                f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
            )
E           ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[0.73496, 1.12383, 0.26487,  ..., 0.61673, 0.91219, 1.20019],
E                    [0.48113, 0.43513, 0.23373,  ..., 1.51522, 0.79363, 1.01430],
E                    [1.29557, 1.38254, 1.07331,  ..., 1.66554, 0.07209, 0.33904],
E                    ...,
E                    [1.72863, 1.08533, 1.57207,  ..., 0.28559, 1.11634, 1.19057],
E                    [1.12506, 0.98804, 1.76224,  ..., 0.88192, 1.23629, 1.08956],
E                    [0.90576, 0.71437, 1.37417,  ..., 0.84095, 0.90509, 1.49623]],
E           
E                   [[1.43176, 0.90725, 0.27420,  ..., 1.23006, 0.63938, 1.00249],
E                    [1.82137, 1.03566, 1.02211,  ..., 0.86952, 1.31769, 1.43452],
E                    [1.11309, 0.68375, 0.71550,  ..., 0.87892, 0.68875, 0.59087],
E                    ...,
E                    [1.68523, 0.71244, 1.18100,  ..., 0.36217, 1.16124, 0.99175],
E                    [1.00374, 0.47009, 1.67520,  ..., 1.36567, 1.63941, 1.35224],
E                    [0.85810, 1.59082, 0.37622,  ..., 1.30486, 1.44561, 0.35451]]]), compiled_model=tensor([[[0.73496, 1.12383, 0.26487,  ..., 0.35614, 1.24644, 1.14843],
E                    [1.29557, 1.38254, 1.07331,  ..., 0.57745, 0.58875, 1.16529],
E                    [1.12588, 0.50664, 0.68537,  ..., 0.93239, 0.48721, 1.23928],
E                    ...,
E                    [1.03377, 0.77774, 1.39789,  ..., 1.55659, 0.84347, 1.39830],
E                    [1.40545, 0.85635, 1.53476,  ..., 0.28559, 1.11634, 1.19057],
E                    [1.37749, 1.17447, 0.92949,  ..., 0.84095, 0.90509, 1.49623]],
E           
E                   [[1.43176, 0.90725, 0.27420,  ..., 1.28238, 1.57625, 0.77028],
E                    [1.11309, 0.68375, 0.71550,  ..., 1.69911, 0.54624, 0.88222],
E                    [0.38749, 0.82986, 1.27037,  ..., 0.34471, 0.90097, 1.21151],
E                    ...,
E                    [0.37735, 0.92445, 1.47703,  ..., 1.51981, 1.26251, 0.97893],
E                    [0.78997, 1.28204, 0.11506,  ..., 0.36217, 1.16124, 0.99175],
E                    [1.07781, 0.75897, 1.22801,  ..., 1.30486, 1.44561, 0.35451]]])

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError

Check failure on line 227 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_greater_equal[dims1]

RuntimeError: Fatal error
Raw output
dims = (6, 33)

    @pytest.mark.parametrize("dims", [(1, 32, 64), (6, 33), (4, 16, 17)])
    @pytest.mark.push
    def test_greater_equal(dims):
        class GreaterEqual(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, a, b):
                return torch.greater_equal(a, b)
    
        inputs = [torch.rand(dims), torch.rand(dims)]
    
        framework_model = GreaterEqual()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model, VerifyConfig(verify_dtype=False))

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:227: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fb5b2da72e0>
inputs = (tensor([[0.49626, 0.76822, 0.08848, 0.13203, 0.30742, 0.63408, 0.49009, 0.89644, 0.45563, 0.63231, 0.34889, 0.40172, ...0.94547, 0.66541, 0.99984, 0.75933, 0.81082, 0.32500, 0.73991, 0.55745, 0.38059, 0.21815, 0.21944, 0.11526, 0.83567]]))
inputs_and_parameters = [tensor([[0.49626, 0.76822, 0.08848, 0.13203, 0.30742, 0.63408, 0.49009, 0.89644, 0.45563, 0.63231, 0.34889, 0.40172, ...0.94547, 0.66541, 0.99984, 0.75933, 0.81082, 0.32500, 0.73991, 0.55745, 0.38059, 0.21815, 0.21944, 0.11526, 0.83567]])]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Fatal error

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 268 in forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_eltwise_binary.test_multiply[shape0]

ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[2.30743e-01, 2.13187e-02, 1.87313e-02,  ..., 3.22881e-01, 1.84691e-02, 4.23863e-01],
         [1.60184e-01, 1.87168e-01, 1.38811e-01,  ..., 5.98106e-01, 1.46211e-01, 2.97836e-01],
         [3.63389e-01, 8.14409e-02, 1.65667e-01,  ..., 9.60350e-02, 3.18001e-04, 5.39877e-02],
         ...,
         [8.22859e-01, 2.26655e-01, 3.01797e-01,  ..., 4.67895e-02, 4.29371e-01, 3.21169e-01],
         [1.09746e-01, 2.58656e-01, 7.23061e-01,  ..., 5.23428e-01, 4.61224e-01, 4.98801e-01],
         [2.67098e-01, 2.77467e-01, 1.18235e-01,  ..., 1.10228e-01, 1.73405e-01, 2.09750e-01]]]), compiled_model=tensor([[[0.23074, 0.02132, 0.01873,  ..., 0.21045, 0.50561, 0.35740],
         [0.36339, 0.08144, 0.16567,  ..., 0.54940, 0.10816, 0.20594],
         [0.02591, 0.04576, 0.35191,  ..., 0.22376, 0.03701, 0.30759],
         ...,
         [0.17571, 0.39912, 0.33128,  ..., 0.54841, 0.19968, 0.64131],
         [0.41272, 0.44021, 0.06422,  ..., 0.04679, 0.42937, 0.32117],
         [0.58724, 0.55476, 0.00146,  ..., 0.11023, 0.17341, 0.20975]]])
Raw output
shape = (1, 32, 32)

    @pytest.mark.parametrize(
        "shape",
        [
            (1, 32, 32),
            (12, 8640),
        ],
    )
    @pytest.mark.push
    def test_multiply(shape):
        class Multiply(nn.Module):
            def __init__(self):
                super().__init__()
    
            def forward(self, a, b):
                return a * b
    
        inputs = [torch.rand(shape), torch.rand(shape)]
    
        framework_model = Multiply()
        compiled_model = forge.compile(framework_model, sample_inputs=inputs)
    
>       verify(inputs, framework_model, compiled_model)

forge/test/mlir/operators/eltwise_binary/test_eltwise_binary.py:268: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
    verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7fb619fc4580>
fw_out = tensor([[[2.30743e-01, 2.13187e-02, 1.87313e-02,  ..., 3.22881e-01, 1.84691e-02, 4.23863e-01],
         [1.60184e-01, ...224e-01, 4.98801e-01],
         [2.67098e-01, 2.77467e-01, 1.18235e-01,  ..., 1.10228e-01, 1.73405e-01, 2.09750e-01]]])
co_out = tensor([[[0.23074, 0.02132, 0.01873,  ..., 0.21045, 0.50561, 0.35740],
         [0.36339, 0.08144, 0.16567,  ..., 0.54...21, 0.06422,  ..., 0.04679, 0.42937, 0.32117],
         [0.58724, 0.55476, 0.00146,  ..., 0.11023, 0.17341, 0.20975]]])

    def check(self, fw_out, co_out):
        if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
>           raise ValueError(
                f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
            )
E           ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[2.30743e-01, 2.13187e-02, 1.87313e-02,  ..., 3.22881e-01, 1.84691e-02, 4.23863e-01],
E                    [1.60184e-01, 1.87168e-01, 1.38811e-01,  ..., 5.98106e-01, 1.46211e-01, 2.97836e-01],
E                    [3.63389e-01, 8.14409e-02, 1.65667e-01,  ..., 9.60350e-02, 3.18001e-04, 5.39877e-02],
E                    ...,
E                    [8.22859e-01, 2.26655e-01, 3.01797e-01,  ..., 4.67895e-02, 4.29371e-01, 3.21169e-01],
E                    [1.09746e-01, 2.58656e-01, 7.23061e-01,  ..., 5.23428e-01, 4.61224e-01, 4.98801e-01],
E                    [2.67098e-01, 2.77467e-01, 1.18235e-01,  ..., 1.10228e-01, 1.73405e-01, 2.09750e-01]]]), compiled_model=tensor([[[0.23074, 0.02132, 0.01873,  ..., 0.21045, 0.50561, 0.35740],
E                    [0.36339, 0.08144, 0.16567,  ..., 0.54940, 0.10816, 0.20594],
E                    [0.02591, 0.04576, 0.35191,  ..., 0.22376, 0.03701, 0.30759],
E                    ...,
E                    [0.17571, 0.39912, 0.33128,  ..., 0.54841, 0.19968, 0.64131],
E                    [0.41272, 0.44021, 0.06422,  ..., 0.04679, 0.42937, 0.32117],
E                    [0.58724, 0.55476, 0.00146,  ..., 0.11023, 0.17341, 0.20975]]])

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError