Skip to content

Add test for deepseek_math (#1148) #463

Add test for deepseek_math (#1148)

Add test for deepseek_math (#1148) #463

GitHub Actions / TT-Forge-FE Tests failed Feb 6, 2025 in 0s

82 tests run, 7 passed, 63 skipped, 12 failed.

Annotations

Check failure on line 46 in forge/test/models/pytorch/audio/stereo/test_stereo.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_stereo.test_stereo[facebook/musicgen-small]

RuntimeError: TT_ASSERT @ /__w/tt-forge-fe/tt-forge-fe/forge/csrc/graph_lib/shape.cpp:135: (i >= 0) && (i < (int)dims_.size())
info:
Trying to access element outside of dimensions: 3
backtrace:
 --- tt::graphlib::Shape::operator[](int)
 --- tt::passes::commute_through_concat(tt::graphlib::Graph*, tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Node*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, bool*, std::pair<int, int>*, tt::graphlib::OpType*, bool)
 --- tt::passes::can_commute_past_op(tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Graph*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, tt::graphlib::Node*)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x18fc4f) [0x7fa8a9fe8c4f]
 --- tt::passes::erase_inverse_ops(tt::graphlib::Graph*)
 --- tt::run_optimization_graph_passes(tt::graphlib::Graph*)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xfda39) [0x7fa8a9f56a39]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0cf5) [0x7fa8a9f39cf5]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x559f980b6b32]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x559f980a6a97]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x559f980c5172]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x559f980a799e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x559f980a197f]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x559f980c510b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x559f980a799e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x559f980a799e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa85ddbf5b0>
variant = 'facebook/musicgen-small'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_stereo(record_forge_property, variant):
        if variant != "facebook/musicgen-small":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="stereo",
            variant=variant,
            task=Task.MUSIC_GENERATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, processor = load_model(variant)
    
        input_ids, attn_mask, decoder_input_ids = load_inputs(framework_model, processor)
        inputs = [input_ids, attn_mask, decoder_input_ids]
    
        # Issue: https://github.com/tenstorrent/tt-forge-fe/issues/615
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/audio/stereo/test_stereo.py:46: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_stereo_facebook_musicgen_small_music_generation_hf], graph_name='pt_stereo_facebook_...in_recompile=False, recompile_count=0, target_cycles_offset=0, forge_module=None, compiled_binary=None, attach_to=None)

    def run_optimization_pass(context: CompileContext) -> CompileDepth:
        """
        Runs optimization passes.
    
        Parameters
        ----------
        context: CompileContext
            Compile context
    
        Returns
        -------
        CompileDepth - next compile stage
        """
        compiler_cfg = context.compiler_cfg
        graph_name = context.graph_name
        graph, intermediate_tensors = context.graph, context.intermediate_tensors
    
>       run_optimization_graph_passes(graph)
E       RuntimeError: TT_ASSERT @ /__w/tt-forge-fe/tt-forge-fe/forge/csrc/graph_lib/shape.cpp:135: (i >= 0) && (i < (int)dims_.size())
E       info:
E       Trying to access element outside of dimensions: 3
E       backtrace:
E        --- tt::graphlib::Shape::operator[](int)
E        --- tt::passes::commute_through_concat(tt::graphlib::Graph*, tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Node*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, bool*, std::pair<int, int>*, tt::graphlib::OpType*, bool)
E        --- tt::passes::can_commute_past_op(tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Graph*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, tt::graphlib::Node*)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x18fc4f) [0x7fa8a9fe8c4f]
E        --- tt::passes::erase_inverse_ops(tt::graphlib::Graph*)
E        --- tt::run_optimization_graph_passes(tt::graphlib::Graph*)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xfda39) [0x7fa8a9f56a39]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0cf5) [0x7fa8a9f39cf5]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x559f980b6b32]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x559f980a6a97]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x559f980c5172]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x559f980a799e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x559f980a197f]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x559f980c510b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x559f980a799e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x559f980a799e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:829: RuntimeError

Check failure on line 64 in forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_stable_diffusion_xl.test_stable_diffusion_generation[stable-diffusion-xl-base-1.0]

NotImplementedError: Unknown output type: <class 'PIL.Image.Image'>
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa848c15900>
variant = 'stable-diffusion-xl-base-1.0'

    @pytest.mark.nightly
    @pytest.mark.skip_model_analysis
    @pytest.mark.parametrize("variant", ["stable-diffusion-xl-base-1.0"])
    def test_stable_diffusion_generation(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="stereo",
            variant=variant,
            task=Task.MUSIC_GENERATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load the pipeline and set it to use the CPU
        pipe = DiffusionPipeline.from_pretrained(f"stabilityai/{variant}", torch_dtype=torch.float32)  # Use float32 for CPU
        pipe.to("cpu")  # Move the model to CPU
    
        # Wrap the pipeline in the wrapper
        framework_model = StableDiffusionXLWrapper(pipe)
    
        # Tokenize the prompt to a tensor
        tokenizer = pipe.tokenizer
        prompt = "An astronaut riding a green horse"
        input_tensor = tokenizer(prompt, return_tensors="pt").input_ids
    
        inputs = [input_tensor]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py:64: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:678: in generate_initial_graph
    module, module_inputs = convert_to_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:1021: in convert_to_forge_module
    forge_module, dev_types, module_inputs = generate_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:2074: in generate_forge_module
    framework_outputs = framework_mod.cpu_eval_forward(*pytorch_inputs)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/module.py:142: in cpu_eval_forward
    outputs = flatten_structured_output([outputs])
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_utils.py:112: in flatten_structured_output
    sub_output = flatten_structured_output(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

outputs = [<PIL.Image.Image image mode=RGB size=1024x1024 at 0x7FA88CA54190>]

    def flatten_structured_output(outputs):
        from forge.tensor import Tensor
    
        new_outputs = []
    
        for i in range(len(outputs)):
            out = outputs[i]
    
            if isinstance(out, (list, tuple)):
                sub_output = flatten_structured_output(
                    out,
                )
                new_outputs += sub_output
    
            elif isinstance(out, dict):
                sub_output = []
                for k, v in out.items():
                    sub_output.append(v)
    
                sub_output = flatten_structured_output(
                    sub_output,
                )
                new_outputs += sub_output
    
            elif isinstance(out, (torch.Tensor, tf.Tensor, Tensor, np.ndarray)):
                new_outputs.append(out)
    
            elif out is None:
                continue
            else:
>               raise NotImplementedError(f"Unknown output type: {type(out)}")
E               NotImplementedError: Unknown output type: <class 'PIL.Image.Image'>

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_utils.py:133: NotImplementedError

Check failure on line 74 in forge/test/models/pytorch/text/codegen/test_codegen.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_codegen.test_codegen[Salesforce/codegen-350M-mono]

AssertionError: Data mismatch on output 0 between framework and Forge codegen
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa7cb05d240>
variant = 'Salesforce/codegen-350M-mono'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_codegen(record_forge_property, variant):
        if variant != "Salesforce/codegen-350M-mono":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="codegen", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load model (with tokenizer)
        tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
        tokenizer.add_special_tokens({"pad_token": "[PAD]"})
        framework_model = download_model(CodeGenForCausalLM.from_pretrained, variant, use_cache=False, return_dict=False)
    
        # Input prompt
        input_prompt = "def hello_world():"
    
        # Tokenize input
        inputs = tokenizer(
            input_prompt,
            return_tensors="pt",
            max_length=256,
            pad_to_max_length=True,
            truncation=True,
        )
        input_ids = inputs["input_ids"]
        attn_mask = inputs["attention_mask"]
    
        # Wrapper to get around attention mask
        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model
    
            def forward(self, input_ids, attention_mask):
                return self.model(input_ids, None, attention_mask)
    
        framework_model = Wrapper(framework_model)
    
        # Sanity run
        input_ids = input_ids.to(torch.int32)
        attn_mask = attn_mask.to(torch.float32)
    
        inputs = [input_ids, attn_mask]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/text/codegen/test_codegen.py:74: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:678: in generate_initial_graph
    module, module_inputs = convert_to_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:1021: in convert_to_forge_module
    forge_module, dev_types, module_inputs = generate_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:2140: in generate_forge_module
    verify_framework_vs_forge_codegen(framework_outputs, forge_outputs, verify_cfg=verify_cfg)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

frame_outputs = [tensor([[[ 4.10871,  3.43310,  2.84573,  ..., -4.54513, -4.55066, -4.54938],
         [13.21362, 13.68193,  9.95741, ...75, -9.02421],
         [ 7.29722, 10.71369,  7.41228,  ..., -6.52333, -6.52313, -6.52258]]], grad_fn=<ViewBackward0>)]
forge_outputs = (Forge Tensor: tensor([[[ 4.10957,  3.43417,  2.84655,  ..., -4.54479, -4.55031, -4.54904],
         [13.21211, 13.679...    [ 8.90092, 12.67207, 12.04863,  ..., -8.01162, -8.00963, -8.01005]]], grad_fn=<AddBackward0>), DataFormat.Float32,)
verify_cfg = DepricatedVerifyConfig(graph_name='graph', enabled=False, intermediates=False, rtol={torch.float32: None, torch.float1...ent_checking=True, enable_parameter_gradient_checking=True, _input_gradient_queue=None, _parameter_gradient_queue=None)

    def verify_framework_vs_forge_codegen(frame_outputs, forge_outputs, verify_cfg):
        from forge.verify.compare import compare_tensor_to_golden
    
        test_pass = True
        for i, (golden, output) in enumerate(zip(frame_outputs, forge_outputs)):
            test_pass &= compare_tensor_to_golden(
                f"Framework vs. Forge codegen output {i}", golden, output.value(), is_forge=False, verify_cfg=verify_cfg
            )
    
>           assert test_pass, f"Data mismatch on output {i} between framework and Forge codegen"
E           AssertionError: Data mismatch on output 0 between framework and Forge codegen

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:1987: AssertionError

Check failure on line 76 in forge/test/models/pytorch/text/gptneo/test_gptneo.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_gptneo.test_gptneo_causal_lm[EleutherAI/gpt-neo-125M]

AssertionError: Data mismatch on output 0 between framework and Forge codegen
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa993dec5e0>
variant = 'EleutherAI/gpt-neo-125M'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_gptneo_causal_lm(record_forge_property, variant):
        if variant != "EleutherAI/gpt-neo-125M":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="gptneo", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Set random seed for repeatability
        torch.manual_seed(42)
    
        # Load tokenizer and model
        # Variants: # EleutherAI/gpt-neo-125M, EleutherAI/gpt-neo-1.3B,
        # EleutherAI/gpt-neo-2.7B
    
        config = download_model(GPTNeoConfig.from_pretrained, variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config_dict["use_cache"] = False
        config = GPTNeoConfig(**config_dict)
    
        tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
        tokenizer.pad_token = tokenizer.eos_token
        model = download_model(GPTNeoForCausalLM.from_pretrained, variant, config=config)
    
        # Sample input text
        prompt = "My name is Bert, and I am"
    
        inputs = tokenizer(prompt, return_tensors="pt", max_length=256, pad_to_max_length=True, truncation=True)
    
        # Wrapper to get around attention mask
        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model
    
            def forward(self, input_ids, attention_mask):
                return self.model(input_ids, None, attention_mask)
    
        framework_model = Wrapper(model)
    
        inputs = [inputs["input_ids"], inputs["attention_mask"]]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/text/gptneo/test_gptneo.py:76: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:678: in generate_initial_graph
    module, module_inputs = convert_to_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:1021: in convert_to_forge_module
    forge_module, dev_types, module_inputs = generate_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:2140: in generate_forge_module
    verify_framework_vs_forge_codegen(framework_outputs, forge_outputs, verify_cfg=verify_cfg)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

frame_outputs = [tensor([[[ -2.93149,  -3.04098,  -5.19186,  ..., -11.81308,  -8.60934,  -6.07345],
         [ -6.99371,  -5.94796, -1...],
         [-38.90412, -38.28091, -42.97676,  ..., -69.93178, -49.82716, -37.80742]]], grad_fn=<UnsafeViewBackward0>)]
forge_outputs = (Forge Tensor: tensor([[[ -2.93151,  -3.04100,  -5.19192,  ..., -11.81311,  -8.60940,  -6.07350],
         [ -6.99369,...2,  -5.60490,  -4.23273,  ..., -16.88002, -15.43275,  -9.50537]]], grad_fn=<UnsafeViewBackward0>), DataFormat.Float32,)
verify_cfg = DepricatedVerifyConfig(graph_name='graph', enabled=False, intermediates=False, rtol={torch.float32: None, torch.float1...ent_checking=True, enable_parameter_gradient_checking=True, _input_gradient_queue=None, _parameter_gradient_queue=None)

    def verify_framework_vs_forge_codegen(frame_outputs, forge_outputs, verify_cfg):
        from forge.verify.compare import compare_tensor_to_golden
    
        test_pass = True
        for i, (golden, output) in enumerate(zip(frame_outputs, forge_outputs)):
            test_pass &= compare_tensor_to_golden(
                f"Framework vs. Forge codegen output {i}", golden, output.value(), is_forge=False, verify_cfg=verify_cfg
            )
    
>           assert test_pass, f"Data mismatch on output {i} between framework and Forge codegen"
E           AssertionError: Data mismatch on output 0 between framework and Forge codegen

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:1987: AssertionError

Check failure on line 21 in forge/test/models/pytorch/text/mistral/test_mistral.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_mistral.test_mistral[mistralai/Mistral-7B-v0.1]

NameError: name 'Task' is not defined
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa7cb05f1c0>
variant = 'mistralai/Mistral-7B-v0.1'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_mistral(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
>           framework=Framework.PYTORCH, model="mistral", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
        )
E       NameError: name 'Task' is not defined

forge/test/models/pytorch/text/mistral/test_mistral.py:21: NameError

Check failure on line 59 in forge/test/models/pytorch/text/t5/test_t5.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_t5.test_t5_generation[t5-base]

NameError: name 'AutoTokenizer' is not defined
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa67c352050>
variant = 't5-base'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_t5_generation(record_forge_property, variant):
        if variant not in {"t5-small", "google/flan-t5-small", "t5-base", "t5-large"}:
            pytest.skip(f"Skipping {variant} due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="t5", variant=variant, task=Task.TEXT_GENERATION, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load tokenizer and model from HuggingFace
        # Variants: t5-small, t5-base, t5-large
    
        config = download_model(T5Config.from_pretrained, variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config_dict["use_cache"] = False
        config = T5Config(**config_dict)
        model = download_model(T5ForConditionalGeneration.from_pretrained, variant, config=config)
>       tokenizer = AutoTokenizer.from_pretrained(variant)
E       NameError: name 'AutoTokenizer' is not defined

forge/test/models/pytorch/text/t5/test_t5.py:59: NameError

Check failure on line 57 in forge/test/models/pytorch/text/xglm/test_xglm.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_xglm.test_xglm_causal_lm[facebook/xglm-564M]

RuntimeError: TT_THROW @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/tt_metal/impl/kernels/kernel.cpp:241: tt::exception
info:
1283 unique+common runtime args targeting kernel reader_concat_stick_layout_interleaved_start_id on (x=0,y=0) are too large. Max allowable is 256
backtrace:
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libtt_metal.so(+0x136165) [0x7fa8a8911165]
 --- tt::tt_metal::v0::Kernel::validate_runtime_args_size(unsigned long, unsigned long, tt::umd::xy_pair const&)
 --- tt::tt_metal::v0::Kernel::set_runtime_args(tt::umd::xy_pair const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libtt_metal.so(+0x2dd603) [0x7fa8a8ab8603]
 --- tt::tt_metal::v0::SetRuntimeArgs(tt::tt_metal::v0::Program const&, unsigned int, std::variant<tt::umd::xy_pair, CoreRange, CoreRangeSet> const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
 --- ttnn::operations::data_movement::detail::concat_multi_core(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, unsigned int, tt::tt_metal::Tensor const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60c9a3) [0x7fa8955809a3]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25a552a) [0x7fa89751952a]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25a5934) [0x7fa897519934]
 --- void ttnn::device_operation::detail::launch_on_worker_thread<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, tt::tt_metal::v0::IDevice*>(unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, tt::tt_metal::v0::IDevice*&)
 --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::launch_on_single_device<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25a4829) [0x7fa897518829]
 --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::invoke<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x259ec4f) [0x7fa897512c4f]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(_ZN2tt8tt_metal9operation19run_with_autoformatEONS1_15DeviceOperationISt6vectorINS0_6TensorESaIS4_EEEERKS6_RKS3_IN4ttnn10operations12experimental11auto_format12FormatParamsESaISF_EERKS3_INS0_6LayoutESaISK_EERKS3_ISt8optionalIKS4_ESaISR_EERKS3_ISP_ISF_ESaISW_EERKS3_ISP_IS4_ESaIS11_EEh+0x503) [0x7fa89750fb53]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60bb99) [0x7fa89557fb99]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25ab2e2) [0x7fa89751f2e2]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25abacc) [0x7fa89751facc]
 --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
 --- ttnn::operations::data_movement::concat_impl(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, long, unsigned int, tt::tt_metal::MemoryConfig const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x605f6b) [0x7fa895579f6b]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x5e60e5) [0x7fa89555a0e5]
 --- ttnn::operations::data_movement::ConcatOperation::invoke(unsigned char, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
 --- ttnn::operations::data_movement::ConcatOperation::invoke(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x57947c) [0x7fa8954ed47c]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x57938b) [0x7fa8954ed38b]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x5792fc) [0x7fa8954ed2fc]
 --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x5790b4) [0x7fa8954ed0b4]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x578b2d) [0x7fa8954ecb2d]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x56f2f0) [0x7fa8954e32f0]
 --- ttnn::operations::data_movement::ExecuteRepeatInterleave::invoke(tt::tt_metal::Tensor const&, unsigned int, int, std::optional<tt::tt_metal::MemoryConfig> const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x7927a4) [0x7fa8a9b487a4]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x7924fa) [0x7fa8a9b484fa]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x79244c) [0x7fa8a9b4844c]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25ab2e2) [0x7fa89751f2e2]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25abacc) [0x7fa89751facc]
 --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x792113) [0x7fa8a9b48113]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x791b8d) [0x7fa8a9b47b8d]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x791687) [0x7fa8a9b47687]
 --- tt::runtime::ttnn::operations::data_movement::run(tt::target::ttnn::RepeatInterleaveOp const*, tt::runtime::ttnn::ProgramContext&)
 --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
 --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
 --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
 --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x3177e0) [0x7fa8aa1707e0]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x31772e) [0x7fa8aa17072e]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0cf5) [0x7fa8a9f39cf5]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x559f980b6b32]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x559f980a6a97]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x559f980ac574]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x559f980c510b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x559f980c5172]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x559f980a799e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x559f980a197f]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x559f980c510b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa67c352950>
variant = 'facebook/xglm-564M'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_xglm_causal_lm(record_forge_property, variant):
        if variant != "facebook/xglm-564M":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="xglm", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        config = XGLMConfig.from_pretrained(variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config_dict["use_cache"] = False
        config = XGLMConfig(**config_dict)
    
        framework_model = download_model(XGLMForCausalLM.from_pretrained, variant, config=config)
    
        tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
        tokenizer.pad_token = tokenizer.eos_token
    
        # Input sample
        prefix_text = "My name is Thomas and my main"
        input_tokens = tokenizer(
            prefix_text,
            max_length=256,
            padding="max_length",
            truncation=True,
            return_tensors="pt",
        )
    
        inputs = [input_tokens["input_ids"], input_tokens["attention_mask"]]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/xglm/test_xglm.py:57: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fa87ada28f0>
inputs = (tensor([[    2,  1622,  4432,    67, 10128,    53,   363,  3270,     2,     2,     2,     2,     2,     2,     2,    ...0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
inputs_and_parameters = [tensor([[    2,  1622,  4432,    67, 10128,    53,   363,  3270,     2,     2,     2,     2,     2,     2,     2,    ... [0.00098],
         [0.00098],
         [0.00098],
         [0.00098],
         [0.00098],
         [0.00098]]]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: TT_THROW @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/tt_metal/impl/kernels/kernel.cpp:241: tt::exception
E       info:
E       1283 unique+common runtime args targeting kernel reader_concat_stick_layout_interleaved_start_id on (x=0,y=0) are too large. Max allowable is 256
E       backtrace:
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libtt_metal.so(+0x136165) [0x7fa8a8911165]
E        --- tt::tt_metal::v0::Kernel::validate_runtime_args_size(unsigned long, unsigned long, tt::umd::xy_pair const&)
E        --- tt::tt_metal::v0::Kernel::set_runtime_args(tt::umd::xy_pair const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libtt_metal.so(+0x2dd603) [0x7fa8a8ab8603]
E        --- tt::tt_metal::v0::SetRuntimeArgs(tt::tt_metal::v0::Program const&, unsigned int, std::variant<tt::umd::xy_pair, CoreRange, CoreRangeSet> const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
E        --- ttnn::operations::data_movement::detail::concat_multi_core(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, unsigned int, tt::tt_metal::Tensor const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60c9a3) [0x7fa8955809a3]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25a552a) [0x7fa89751952a]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25a5934) [0x7fa897519934]
E        --- void ttnn::device_operation::detail::launch_on_worker_thread<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, tt::tt_metal::v0::IDevice*>(unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, tt::tt_metal::v0::IDevice*&)
E        --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::launch_on_single_device<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25a4829) [0x7fa897518829]
E        --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::invoke<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x259ec4f) [0x7fa897512c4f]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(_ZN2tt8tt_metal9operation19run_with_autoformatEONS1_15DeviceOperationISt6vectorINS0_6TensorESaIS4_EEEERKS6_RKS3_IN4ttnn10operations12experimental11auto_format12FormatParamsESaISF_EERKS3_INS0_6LayoutESaISK_EERKS3_ISt8optionalIKS4_ESaISR_EERKS3_ISP_ISF_ESaISW_EERKS3_ISP_IS4_ESaIS11_EEh+0x503) [0x7fa89750fb53]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x60bb99) [0x7fa89557fb99]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25ab2e2) [0x7fa89751f2e2]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25abacc) [0x7fa89751facc]
E        --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E        --- ttnn::operations::data_movement::concat_impl(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, long, unsigned int, tt::tt_metal::MemoryConfig const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x605f6b) [0x7fa895579f6b]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x5e60e5) [0x7fa89555a0e5]
E        --- ttnn::operations::data_movement::ConcatOperation::invoke(unsigned char, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
E        --- ttnn::operations::data_movement::ConcatOperation::invoke(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x57947c) [0x7fa8954ed47c]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x57938b) [0x7fa8954ed38b]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x5792fc) [0x7fa8954ed2fc]
E        --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x5790b4) [0x7fa8954ed0b4]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x578b2d) [0x7fa8954ecb2d]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x56f2f0) [0x7fa8954e32f0]
E        --- ttnn::operations::data_movement::ExecuteRepeatInterleave::invoke(tt::tt_metal::Tensor const&, unsigned int, int, std::optional<tt::tt_metal::MemoryConfig> const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x7927a4) [0x7fa8a9b487a4]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x7924fa) [0x7fa8a9b484fa]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x79244c) [0x7fa8a9b4844c]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25ab2e2) [0x7fa89751f2e2]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/_ttnn.so(+0x25abacc) [0x7fa89751facc]
E        --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x792113) [0x7fa8a9b48113]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x791b8d) [0x7fa8a9b47b8d]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/lib/libTTMLIR.so(+0x791687) [0x7fa8a9b47687]
E        --- tt::runtime::ttnn::operations::data_movement::run(tt::target::ttnn::RepeatInterleaveOp const*, tt::runtime::ttnn::ProgramContext&)
E        --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
E        --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E        --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E        --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x3177e0) [0x7fa8aa1707e0]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x31772e) [0x7fa8aa17072e]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0cf5) [0x7fa8a9f39cf5]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x559f980b6b32]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x559f980a6a97]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x559f980ac574]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x559f980c510b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x559f980c5172]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x559f980ad39b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x559f980a799e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x559f980a197f]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x559f980a6702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x559f980ac61d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x559f980c162c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x559f981c9464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x559f980c510b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x559f980a3c30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x559f980a1790]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x559f980c44d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x559f980a29ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x559f980b738c]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 59 in forge/test/models/pytorch/vision/alexnet/test_alexnet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_alexnet.test_alexnet_torchhub

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa993dee7a0>

    @pytest.mark.nightly
    def test_alexnet_torchhub(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="alexnet",
            variant="alexnet",
            source=Source.TORCH_HUB,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load model
        framework_model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "alexnet", pretrained=True)
        framework_model.eval()
    
        # Load and pre-process image
        try:
            torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            input_image = Image.open("dog.jpg")
            preprocess = transforms.Compose(
                [
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                ]
            )
            img_tensor = preprocess(input_image).unsqueeze(0)
        except:
            logger.warning(
                "Failed to download the image file, replacing input with random tensor. Please check if the URL is up to date"
            )
            img_tensor = torch.rand(1, 3, 224, 224)
    
        inputs = [img_tensor]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/alexnet/test_alexnet.py:59: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_alexnet_alexnet_img_cls_torchhub], graph_name='pt_alexnet_alexnet_img_cls_torchhub',...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7fa997f7fcf0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError

Check failure on line 114 in forge/test/models/pytorch/vision/densenet/test_densenet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_densenet.test_densenet_169_pytorch[densenet169]

RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa993cb3b50>
variant = 'densenet169'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["densenet169"])
    def test_densenet_169_pytorch(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="densenet",
            variant=variant,
            source=Source.TORCHVISION,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # STEP 2: Create Forge module from PyTorch model
        framework_model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet169", pretrained=True)
    
        # STEP 3: Run inference on Tenstorrent device
        img_tensor = get_input_img()
    
        inputs = [img_tensor]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/densenet/test_densenet.py:114: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7fa997ec0d60>
inputs = (tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241... ..., -0.95041, -1.49072, -1.38614],
          [-1.28157, -1.42100, -1.22928,  ..., -0.74126, -1.12471, -1.28157]]]]),)
inputs_and_parameters = [tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241...2.10225, 1.20796, 2.29127, 1.71784, 2.46689, 2.76570, 1.49687, 1.84175, 1.53622, 2.85604]]]], requires_grad=True), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 93 in forge/test/models/pytorch/vision/rcnn/test_rcnn.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_rcnn.test_rcnn_pytorch

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa848790670>

    @pytest.mark.nightly
    def test_rcnn_pytorch(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="rcnn", source=Source.TORCHVISION, task=Task.OBJECT_DETECTION
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load Alexnet Model
        framework_model = torchvision.models.alexnet(pretrained=True)
        num_classes = 2
        num_features = framework_model.classifier[6].in_features
    
        # Create class specific linear SVMs [Refer Section 2 in paper]
        svm_layer = nn.Linear(num_features, num_classes)
    
        # Replacing the Alexnet's ImageNet specific 1000-way classification layer with a randomly initialized (N + 1)-way classification layer(where N is the number of object classes, plus 1 for background)
        # [Refer Section 2.3.Domain-specific fine-tuning in Paper]
        init.normal_(svm_layer.weight, mean=0, std=0.01)
        init.constant_(svm_layer.bias, 0)
        framework_model.classifier[6] = svm_layer
    
        framework_model.eval()
    
        # Cancel gradient tracking
        for param in framework_model.parameters():
            param.requires_grad = False
    
        # Image
        img = cv2.imread("forge/test/models/files/samples/images/car.jpg")
    
        transform = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.Resize((227, 227)),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]
        )
    
        # Selective search - A separate tool for generating proposals(potential regions that might contain objects) which can be fed to actual model
        # As it is a pre-processing step,it is implemented on cpu
        gs = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
        gs.setBaseImage(img)
        gs.switchToSelectiveSearchFast()
        rects = gs.process()
        rects[:, 2] += rects[:, 0]
        rects[:, 3] += rects[:, 1]
        print("Suggested number of proposals: %d" % len(rects))
    
        # Proposals generated by selective search were fed to a model in a loop manner to compute features.
        # [Refer line No.151 in https://github.com/object-detection-algorithm/R-CNN/blob/master/py/car_detector.py]
        for idx, rect in enumerate(rects):
    
            xmin, ymin, xmax, ymax = rect
            rect_img = img[ymin:ymax, xmin:xmax]
    
            rect_transform = transform(rect_img)
    
            inputs = [rect_transform.unsqueeze(0)]
    
            # Build Module Name
            module_name = build_module_name(
                framework=Framework.PYTORCH,
                model="rcnn",
                suffix=f"rect_{idx}",
                source=Source.TORCHVISION,
                task=Task.OBJECT_DETECTION,
            )
    
            # Forge compile framework model
>           compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/rcnn/test_rcnn.py:93: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_rcnn_base_obj_det_torchvision_rect_0], graph_name='pt_rcnn_base_obj_det_torchvision_...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7fa67c25dc30>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError

Check failure on line 61 in forge/test/models/pytorch/vision/segformer/test_segformer.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_segformer.test_segformer_image_classification_pytorch[nvidia/mit-b0]

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa68443af80>
variant = 'nvidia/mit-b0'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants_img_classification)
    def test_segformer_image_classification_pytorch(record_forge_property, variant):
        if variant != "nvidia/mit-b0":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="segformer",
            variant=variant,
            task=Task.IMAGE_CLASSIFICATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Set model configurations
        config = SegformerConfig.from_pretrained(variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config = SegformerConfig(**config_dict)
    
        # Load the model from HuggingFace
        framework_model = SegformerForImageClassification.from_pretrained(variant, config=config)
        framework_model.eval()
    
        # Load the sample image
        pixel_values = get_sample_data(variant)
        inputs = [pixel_values]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/segformer/test_segformer.py:61: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_segformer_nvidia_mit_b0_img_cls_hf], graph_name='pt_segformer_nvidia_mit_b0_img_cls_...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7fa69065bcf0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError

Check failure on line 134 in forge/test/models/onnx/vision/yolo/test_yolo_v5.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_yolo_v5.test_yolov5_480x480[yolov5s]

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7fa993cb2680>
size = 's'

    @pytest.mark.nightly
    @pytest.mark.parametrize("size", size, ids=["yolov5" + s for s in size])
    def test_yolov5_480x480(record_forge_property, size):
        if size != "s":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="yolo_v5",
            variant="yolov5" + size,
            task="imgcls",
            source="torchhub",
            suffix="480x480",
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, inputs, _ = generate_model_yoloV5I480_imgcls_torchhub_pytorch(
            "ultralytics/yolov5",
            size=size,
        )
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/yolo/test_yolo_v5.py:134: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_yolo_v5_yolov5s_imgcls_torchhub_480x480], graph_name='pt_yolo_v5_yolov5s_imgcls_torc...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7fa997fe24b0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError