Skip to content

On PR

On PR #2296

GitHub Actions / TT-Forge-FE Tests failed Jan 31, 2025 in 0s

96 tests run, 6 passed, 77 skipped, 13 failed.

Annotations

Check failure on line 46 in forge/test/models/pytorch/audio/stereo/test_stereo.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_stereo.test_stereo[facebook/musicgen-small]

RuntimeError: TT_ASSERT @ /__w/tt-forge-fe/tt-forge-fe/forge/csrc/graph_lib/shape.cpp:135: (i >= 0) && (i < (int)dims_.size())
info:
Trying to access element outside of dimensions: 3
backtrace:
 --- tt::graphlib::Shape::operator[](int)
 --- tt::passes::commute_through_concat(tt::graphlib::Graph*, tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Node*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, bool*, std::pair<int, int>*, tt::graphlib::OpType*, bool)
 --- tt::passes::can_commute_past_op(tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Graph*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, tt::graphlib::Node*)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x18fc4f) [0x7f1a753e8c4f]
 --- tt::passes::erase_inverse_ops(tt::graphlib::Graph*)
 --- tt::run_optimization_graph_passes(tt::graphlib::Graph*)
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xfda39) [0x7f1a75356a39]
 --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0cf5) [0x7f1a75339cf5]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x55675438db32]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55675438439b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x55675437da97]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x55675439c172]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55675438361d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55675439862c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x5567544a0464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55675438439b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55675437e99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x55675437897f]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55675438361d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55675439862c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x5567544a0464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55675439c10b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55675438361d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55675439862c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x5567544a0464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55675438439b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55675437e99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55675438361d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55675439862c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x5567544a0464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55675438439b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55675437e99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1a280a89d0>
variant = 'facebook/musicgen-small'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_stereo(record_forge_property, variant):
        if variant != "facebook/musicgen-small":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="stereo",
            variant=variant,
            task=Task.MUSIC_GENERATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, processor = load_model(variant)
    
        input_ids, attn_mask, decoder_input_ids = load_inputs(framework_model, processor)
        inputs = [input_ids, attn_mask, decoder_input_ids]
    
        # Issue: https://github.com/tenstorrent/tt-forge-fe/issues/615
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/audio/stereo/test_stereo.py:46: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_stereo_facebook_musicgen_small_music_generation_hf], graph_name='pt_stereo_facebook_...in_recompile=False, recompile_count=0, target_cycles_offset=0, forge_module=None, compiled_binary=None, attach_to=None)

    def run_optimization_pass(context: CompileContext) -> CompileDepth:
        """
        Runs optimization passes.
    
        Parameters
        ----------
        context: CompileContext
            Compile context
    
        Returns
        -------
        CompileDepth - next compile stage
        """
        compiler_cfg = context.compiler_cfg
        graph_name = context.graph_name
        graph, intermediate_tensors = context.graph, context.intermediate_tensors
    
>       run_optimization_graph_passes(graph)
E       RuntimeError: TT_ASSERT @ /__w/tt-forge-fe/tt-forge-fe/forge/csrc/graph_lib/shape.cpp:135: (i >= 0) && (i < (int)dims_.size())
E       info:
E       Trying to access element outside of dimensions: 3
E       backtrace:
E        --- tt::graphlib::Shape::operator[](int)
E        --- tt::passes::commute_through_concat(tt::graphlib::Graph*, tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Node*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, bool*, std::pair<int, int>*, tt::graphlib::OpType*, bool)
E        --- tt::passes::can_commute_past_op(tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Graph*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, tt::graphlib::Node*)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0x18fc4f) [0x7f1a753e8c4f]
E        --- tt::passes::erase_inverse_ops(tt::graphlib::Graph*)
E        --- tt::run_optimization_graph_passes(tt::graphlib::Graph*)
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xfda39) [0x7f1a75356a39]
E        --- /opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/_C.so(+0xe0cf5) [0x7f1a75339cf5]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x55675438db32]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55675438439b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x55675437da97]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x55675439c172]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55675438361d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55675439862c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x5567544a0464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55675438439b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55675437e99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x55675437897f]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55675438361d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55675439862c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x5567544a0464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x55675439c10b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55675438361d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55675439862c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x5567544a0464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55675438439b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55675437e99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x55675439b4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x55675437d702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x55675438361d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x55675439862c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x5567544a0464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x55675438439b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x55675437e99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x556754378790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x55675437ac30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x5567543799ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x55675438e38c]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:829: RuntimeError

Check failure on line 64 in forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_stable_diffusion_xl.test_stable_diffusion_generation[stable-diffusion-xl-base-1.0]

NotImplementedError: Unknown output type: <class 'PIL.Image.Image'>
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1a14f4bd00>
variant = 'stable-diffusion-xl-base-1.0'

    @pytest.mark.nightly
    @pytest.mark.skip_model_analysis
    @pytest.mark.parametrize("variant", ["stable-diffusion-xl-base-1.0"])
    def test_stable_diffusion_generation(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="stereo",
            variant=variant,
            task=Task.MUSIC_GENERATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load the pipeline and set it to use the CPU
        pipe = DiffusionPipeline.from_pretrained(f"stabilityai/{variant}", torch_dtype=torch.float32)  # Use float32 for CPU
        pipe.to("cpu")  # Move the model to CPU
    
        # Wrap the pipeline in the wrapper
        framework_model = StableDiffusionXLWrapper(pipe)
    
        # Tokenize the prompt to a tensor
        tokenizer = pipe.tokenizer
        prompt = "An astronaut riding a green horse"
        input_tensor = tokenizer(prompt, return_tensors="pt").input_ids
    
        inputs = [input_tensor]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py:64: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:678: in generate_initial_graph
    module, module_inputs = convert_to_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:1021: in convert_to_forge_module
    forge_module, dev_types, module_inputs = generate_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:2074: in generate_forge_module
    framework_outputs = framework_mod.cpu_eval_forward(*pytorch_inputs)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/module.py:142: in cpu_eval_forward
    outputs = flatten_structured_output([outputs])
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_utils.py:112: in flatten_structured_output
    sub_output = flatten_structured_output(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

outputs = [<PIL.Image.Image image mode=RGB size=1024x1024 at 0x7F1A14FABF40>]

    def flatten_structured_output(outputs):
        from forge.tensor import Tensor
    
        new_outputs = []
    
        for i in range(len(outputs)):
            out = outputs[i]
    
            if isinstance(out, (list, tuple)):
                sub_output = flatten_structured_output(
                    out,
                )
                new_outputs += sub_output
    
            elif isinstance(out, dict):
                sub_output = []
                for k, v in out.items():
                    sub_output.append(v)
    
                sub_output = flatten_structured_output(
                    sub_output,
                )
                new_outputs += sub_output
    
            elif isinstance(out, (torch.Tensor, tf.Tensor, Tensor, np.ndarray)):
                new_outputs.append(out)
    
            elif out is None:
                continue
            else:
>               raise NotImplementedError(f"Unknown output type: {type(out)}")
E               NotImplementedError: Unknown output type: <class 'PIL.Image.Image'>

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_utils.py:133: NotImplementedError

Check failure on line 74 in forge/test/models/pytorch/text/codegen/test_codegen.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_codegen.test_codegen[Salesforce/codegen-350M-mono]

AssertionError: Data mismatch on output 0 between framework and Forge codegen
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f198fac7490>
variant = 'Salesforce/codegen-350M-mono'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_codegen(record_forge_property, variant):
        if variant != "Salesforce/codegen-350M-mono":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="codegen", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load model (with tokenizer)
        tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
        tokenizer.add_special_tokens({"pad_token": "[PAD]"})
        framework_model = download_model(CodeGenForCausalLM.from_pretrained, variant, use_cache=False, return_dict=False)
    
        # Input prompt
        input_prompt = "def hello_world():"
    
        # Tokenize input
        inputs = tokenizer(
            input_prompt,
            return_tensors="pt",
            max_length=256,
            pad_to_max_length=True,
            truncation=True,
        )
        input_ids = inputs["input_ids"]
        attn_mask = inputs["attention_mask"]
    
        # Wrapper to get around attention mask
        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model
    
            def forward(self, input_ids, attention_mask):
                return self.model(input_ids, None, attention_mask)
    
        framework_model = Wrapper(framework_model)
    
        # Sanity run
        input_ids = input_ids.to(torch.int32)
        attn_mask = attn_mask.to(torch.float32)
    
        inputs = [input_ids, attn_mask]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/text/codegen/test_codegen.py:74: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:678: in generate_initial_graph
    module, module_inputs = convert_to_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:1021: in convert_to_forge_module
    forge_module, dev_types, module_inputs = generate_forge_module(
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:2140: in generate_forge_module
    verify_framework_vs_forge_codegen(framework_outputs, forge_outputs, verify_cfg=verify_cfg)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

frame_outputs = [tensor([[[ 4.10871,  3.43310,  2.84573,  ..., -4.54513, -4.55066, -4.54938],
         [13.21362, 13.68193,  9.95741, ...75, -9.02421],
         [ 7.29722, 10.71369,  7.41228,  ..., -6.52333, -6.52313, -6.52258]]], grad_fn=<ViewBackward0>)]
forge_outputs = (Forge Tensor: tensor([[[ 4.10957,  3.43417,  2.84655,  ..., -4.54479, -4.55031, -4.54904],
         [13.21211, 13.679...    [ 8.90092, 12.67207, 12.04863,  ..., -8.01162, -8.00963, -8.01005]]], grad_fn=<AddBackward0>), DataFormat.Float32,)
verify_cfg = DepricatedVerifyConfig(graph_name='graph', enabled=False, intermediates=False, rtol={torch.float32: None, torch.float1...ent_checking=True, enable_parameter_gradient_checking=True, _input_gradient_queue=None, _parameter_gradient_queue=None)

    def verify_framework_vs_forge_codegen(frame_outputs, forge_outputs, verify_cfg):
        from forge.verify.compare import compare_tensor_to_golden
    
        test_pass = True
        for i, (golden, output) in enumerate(zip(frame_outputs, forge_outputs)):
            test_pass &= compare_tensor_to_golden(
                f"Framework vs. Forge codegen output {i}", golden, output.value(), is_forge=False, verify_cfg=verify_cfg
            )
    
>           assert test_pass, f"Data mismatch on output {i} between framework and Forge codegen"
E           AssertionError: Data mismatch on output 0 between framework and Forge codegen

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/tvm_to_python.py:1987: AssertionError

Check failure on line 368 in forge/test/models/pytorch/text/gemma/test_gemma_2b.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_gemma_2b.test_gemma_2b[google/gemma-2b]

RuntimeError: Input count mismatch: expected 366, got 367
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1b63bff6d0>
variant = 'google/gemma-2b'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_gemma_2b(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="gemma",
            variant=variant,
            source=Source.HUGGINGFACE,
            task=Task.TEXT_GENERATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Random see for reproducibility
        torch.manual_seed(42)
    
        config = download_model(GemmaConfig.from_pretrained, variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config_dict["use_cache"] = False
        config = GemmaConfig(**config_dict)
        framework_model = download_model(GemmaForCausalLM.from_pretrained, variant, config=config)
    
        # Load tokenizer
        tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
        tokenizer.pad_token = tokenizer.eos_token
    
        # Sample input
        prompt = "What is your favorite city?"
        inputs = tokenizer(prompt, return_tensors="pt")
    
        # Sanity run
        generate_ids = framework_model.generate(inputs.input_ids, max_length=30)
        generated_text = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[
            0
        ]
    
        print(f"Sanity run generated text: {generated_text}")
    
        input_ids = inputs["input_ids"]
        attn_mask = inputs["attention_mask"]
    
        inputs = [input_ids, attn_mask]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/gemma/test_gemma_2b.py:368: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f1a1489b220>
inputs = (tensor([[     2,   1841,    603,    861,   7827,   3413, 235336]]), tensor([[1, 1, 1, 1, 1, 1, 1]]))
inputs_and_parameters = [tensor([[     2,   1841,    603,    861,   7827,   3413, 235336]]), tensor([[1, 1, 1, 1, 1, 1, 1]]), tensor([45.25483...7e-03, 1.06697e-03, 9.92890e-04, 9.23956e-04, 8.59808e-04, 8.00113e-04, 7.44563e-04, 6.92869e-04, 6.44765e-04]]]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Input count mismatch: expected 366, got 367

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 168 in forge/test/models/pytorch/text/t5/test_t5.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_t5.test_t5_generation[t5-small]

ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[-21.42685,  -7.10311, -13.34143,  ..., -40.87328, -40.79771, -40.71077]]], grad_fn=<UnsafeViewBackward0>), compiled_model=tensor([[[ 15.12560,  -7.13685,  -9.70421,  ..., -35.87063, -35.87774, -35.95815]]])
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1a1480bb50>
variant = 't5-small'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_t5_generation(record_forge_property, variant):
        if variant not in {"t5-small", "google/flan-t5-small", "t5-base", "t5-large"}:
            pytest.skip(f"Skipping {variant} due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="t5", variant=variant, task=Task.TEXT_GENERATION, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load tokenizer and model from HuggingFace
        # Variants: t5-small, t5-base, t5-large
    
        config = download_model(T5Config.from_pretrained, variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config_dict["use_cache"] = False
        config = T5Config(**config_dict)
        model = download_model(T5ForConditionalGeneration.from_pretrained, variant, config=config)
        tokenizer = AutoTokenizer.from_pretrained(variant)
    
        inputs = tokenizer(
            "summarize: Researchers have extensively studied the benefits of having pets, "
            "particularly dogs, on human health and well-being. Findings suggest that pet ownership "
            "can lead to improved mental health, reduced stress levels, and even physical health benefits "
            "such as lower blood pressure and increased physical activity levels due to regular walks.",
            return_tensors="pt",
        )
    
        input_ids = inputs.input_ids
        decoder_start_token_tensor = torch.tensor(model.generation_config.decoder_start_token_id, dtype=torch.long)
        decoder_input_ids = torch.ones((1, 1), dtype=torch.long) * decoder_start_token_tensor
        inputs = [input_ids, decoder_input_ids]
    
        class Wrapper(torch.nn.Module):
            def __init__(self, model):
                super().__init__()
                self.model = model
    
            def forward(self, input_ids, decoder_input_ids):
                inputs = {"input_ids": input_ids, "decoder_input_ids": decoder_input_ids}
                output = self.model(**inputs)
                return output
    
        framework_model = Wrapper(model)
    
        # Forge compile
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/t5/test_t5.py:168: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:333: in verify
    verify_cfg.value_checker.check(fw, co)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.verify.value_checkers.AutomaticValueChecker object at 0x7f1a5bac0190>
fw_out = tensor([[[-21.42685,  -7.10311, -13.34143,  ..., -40.87328, -40.79771, -40.71077]]], grad_fn=<UnsafeViewBackward0>)
co_out = tensor([[[ 15.12560,  -7.13685,  -9.70421,  ..., -35.87063, -35.87774, -35.95815]]])

    def check(self, fw_out, co_out):
        if not compare_with_golden(fw_out, co_out, self.pcc, self.rtol, self.atol, self.dissimilarity_threshold):
>           raise ValueError(
                f"Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model={fw_out}, compiled_model={co_out}"
            )
E           ValueError: Data mismatch -> AutomaticValueChecker (compare_with_golden): framework_model=tensor([[[-21.42685,  -7.10311, -13.34143,  ..., -40.87328, -40.79771, -40.71077]]], grad_fn=<UnsafeViewBackward0>), compiled_model=tensor([[[ 15.12560,  -7.13685,  -9.70421,  ..., -35.87063, -35.87774, -35.95815]]])

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/value_checkers.py:38: ValueError

Check failure on line 59 in forge/test/models/pytorch/vision/alexnet/test_alexnet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_alexnet.test_alexnet_torchhub

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1a144fa200>

    @pytest.mark.nightly
    def test_alexnet_torchhub(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="alexnet",
            variant="alexnet",
            source=Source.TORCH_HUB,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load model
        framework_model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "alexnet", pretrained=True)
        framework_model.eval()
    
        # Load and pre-process image
        try:
            torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
            input_image = Image.open("dog.jpg")
            preprocess = transforms.Compose(
                [
                    transforms.Resize(256),
                    transforms.CenterCrop(224),
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                ]
            )
            img_tensor = preprocess(input_image).unsqueeze(0)
        except:
            logger.warning(
                "Failed to download the image file, replacing input with random tensor. Please check if the URL is up to date"
            )
            img_tensor = torch.rand(1, 3, 224, 224)
    
        inputs = [img_tensor]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/alexnet/test_alexnet.py:59: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_alexnet_alexnet_img_cls_torchhub], graph_name='pt_alexnet_alexnet_img_cls_torchhub',...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f185a44ed70>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError

Check failure on line 114 in forge/test/models/pytorch/vision/densenet/test_densenet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_densenet.test_densenet_169_pytorch[densenet169]

RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1a145555a0>
variant = 'densenet169'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["densenet169"])
    def test_densenet_169_pytorch(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="densenet",
            variant=variant,
            source=Source.TORCHVISION,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # STEP 2: Create Forge module from PyTorch model
        framework_model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet169", pretrained=True)
    
        # STEP 3: Run inference on Tenstorrent device
        img_tensor = get_input_img()
    
        inputs = [img_tensor]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/densenet/test_densenet.py:114: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f198f289180>
inputs = (tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241... ..., -0.95041, -1.49072, -1.38614],
          [-1.28157, -1.42100, -1.22928,  ..., -0.74126, -1.12471, -1.28157]]]]),)
inputs_and_parameters = [tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241...2.10225, 1.20796, 2.29127, 1.71784, 2.46689, 2.76570, 1.49687, 1.84175, 1.53622, 2.85604]]]], requires_grad=True), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 174 in forge/test/models/pytorch/vision/hrnet/test_hrnet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_hrnet.test_hrnet_timm_pytorch[hrnet_w18_small]

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1991cc3520>
variant = 'hrnet_w18_small'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_hrnet_timm_pytorch(record_forge_property, variant):
        if variant != "hrnet_w18_small":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="hrnet", variant=variant, source=Source.TIMM, task=Task.POSE_ESTIMATION
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, inputs, _ = generate_model_hrnet_imgcls_timm_pytorch(
            variant,
        )
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/hrnet/test_hrnet.py:174: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_hrnet_hrnet_w18_small_pose_estimation_timm], graph_name='pt_hrnet_hrnet_w18_small_po...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f1a45152ef0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError

Check failure on line 48 in forge/test/models/pytorch/vision/inception/test_inception_v4.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_inception_v4.test_inception_v4_osmr_pytorch

RuntimeError: Tensor 47 - stride mismatch: expected [1225, 1], got [0, 0]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1991cc23b0>

    @pytest.mark.nightly
    def test_inception_v4_osmr_pytorch(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="inception", variant="v4", source=Source.OSMR, task=Task.IMAGE_CLASSIFICATION
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, inputs = generate_model_inceptionV4_imgcls_osmr_pytorch("inceptionv4")
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/inception/test_inception_v4.py:48: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f1b6545bee0>
inputs = (tensor([[[[-2.08365, -2.03228, -1.99803,  ..., -2.04940, -2.03228, -1.99803],
          [-2.04940, -2.03228, -2.04940... ..., -0.63669, -0.95041, -0.63669],
          [-0.60183, -0.79355, -1.05499,  ..., -0.49725, -1.10728, -0.91556]]]]),)
inputs_and_parameters = [tensor([[[[-2.08365, -2.03228, -1.99803,  ..., -2.04940, -2.03228, -1.99803],
          [-2.04940, -2.03228, -2.04940...0., 0.,  ..., 0., 0., 0.],
          [0., 0., 0.,  ..., 0., 0., 0.],
          [0., 0., 0.,  ..., 0., 0., 0.]]]]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 47 - stride mismatch: expected [1225, 1], got [0, 0]

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 93 in forge/test/models/pytorch/vision/rcnn/test_rcnn.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_rcnn.test_rcnn_pytorch

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1982569f30>

    @pytest.mark.nightly
    def test_rcnn_pytorch(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="rcnn", source=Source.TORCHVISION, task=Task.OBJECT_DETECTION
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load Alexnet Model
        framework_model = torchvision.models.alexnet(pretrained=True)
        num_classes = 2
        num_features = framework_model.classifier[6].in_features
    
        # Create class specific linear SVMs [Refer Section 2 in paper]
        svm_layer = nn.Linear(num_features, num_classes)
    
        # Replacing the Alexnet's ImageNet specific 1000-way classification layer with a randomly initialized (N + 1)-way classification layer(where N is the number of object classes, plus 1 for background)
        # [Refer Section 2.3.Domain-specific fine-tuning in Paper]
        init.normal_(svm_layer.weight, mean=0, std=0.01)
        init.constant_(svm_layer.bias, 0)
        framework_model.classifier[6] = svm_layer
    
        framework_model.eval()
    
        # Cancel gradient tracking
        for param in framework_model.parameters():
            param.requires_grad = False
    
        # Image
        img = cv2.imread("forge/test/models/files/samples/images/car.jpg")
    
        transform = transforms.Compose(
            [
                transforms.ToPILImage(),
                transforms.Resize((227, 227)),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
            ]
        )
    
        # Selective search - A separate tool for generating proposals(potential regions that might contain objects) which can be fed to actual model
        # As it is a pre-processing step,it is implemented on cpu
        gs = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
        gs.setBaseImage(img)
        gs.switchToSelectiveSearchFast()
        rects = gs.process()
        rects[:, 2] += rects[:, 0]
        rects[:, 3] += rects[:, 1]
        print("Suggested number of proposals: %d" % len(rects))
    
        # Proposals generated by selective search were fed to a model in a loop manner to compute features.
        # [Refer line No.151 in https://github.com/object-detection-algorithm/R-CNN/blob/master/py/car_detector.py]
        for idx, rect in enumerate(rects):
    
            xmin, ymin, xmax, ymax = rect
            rect_img = img[ymin:ymax, xmin:xmax]
    
            rect_transform = transform(rect_img)
    
            inputs = [rect_transform.unsqueeze(0)]
    
            # Build Module Name
            module_name = build_module_name(
                framework=Framework.PYTORCH,
                model="rcnn",
                suffix=f"rect_{idx}",
                source=Source.TORCHVISION,
                task=Task.OBJECT_DETECTION,
            )
    
            # Forge compile framework model
>           compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/rcnn/test_rcnn.py:93: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_rcnn_base_obj_det_torchvision_rect_0], graph_name='pt_rcnn_base_obj_det_torchvision_...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f185458f270>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError

Check failure on line 61 in forge/test/models/pytorch/vision/segformer/test_segformer.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_segformer.test_segformer_image_classification_pytorch[nvidia/mit-b0]

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f19823d68c0>
variant = 'nvidia/mit-b0'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants_img_classification)
    def test_segformer_image_classification_pytorch(record_forge_property, variant):
        if variant != "nvidia/mit-b0":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="segformer",
            variant=variant,
            task=Task.IMAGE_CLASSIFICATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Set model configurations
        config = SegformerConfig.from_pretrained(variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config = SegformerConfig(**config_dict)
    
        # Load the model from HuggingFace
        framework_model = SegformerForImageClassification.from_pretrained(variant, config=config)
        framework_model.eval()
    
        # Load the sample image
        pixel_values = get_sample_data(variant)
        inputs = [pixel_values]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/segformer/test_segformer.py:61: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_segformer_nvidia_mit_b0_img_cls_hf], graph_name='pt_segformer_nvidia_mit_b0_img_cls_...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f198e173bf0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError

Check failure on line 52 in forge/test/models/onnx/vision/yolo/test_yolo_v5.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_yolo_v5.test_yolov5_320x320[yolov5s]

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f1b63b09870>
size = 's'

    @pytest.mark.nightly
    @pytest.mark.parametrize("size", size, ids=["yolov5" + s for s in size])
    def test_yolov5_320x320(record_forge_property, size):
        if size != "s":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="yolo_v5",
            variant="yolov5" + size,
            task="imgcls",
            source="torchhub",
            suffix="320x320",
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, inputs, _ = generate_model_yoloV5I320_imgcls_torchhub_pytorch(
            "ultralytics/yolov5",
            size=size,
        )
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/yolo/test_yolo_v5.py:52: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_yolo_v5_yolov5s_imgcls_torchhub_320x320], graph_name='pt_yolo_v5_yolov5s_imgcls_torc...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f198f657970>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError

Check failure on line 68 in forge/test/models/pytorch/vision/yolo/test_yolo_v6.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_yolo_v6.test_yolo_v6_pytorch[yolov6n]

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f19815a4af0>
variant = 'yolov6n'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_yolo_v6_pytorch(record_forge_property, variant):
        if variant != "yolov6n":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="yolo_v6",
            variant=variant,
            source=Source.TORCH_HUB,
            task=Task.OBJECT_DETECTION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # STEP 2 :prepare model
        url = f"https://github.com/meituan/YOLOv6/releases/download/0.3.0/{variant}.pt"
        weights = f"{variant}.pt"
    
        try:
            response = requests.get(url)
            with open(weights, "wb") as file:
                file.write(response.content)
            print(f"Downloaded {url} to {weights}")
        except Exception as e:
            print(f"Error downloading {url}: {e}")
    
        model = YOLOV6(weights)
        framework_model = model.model
        framework_model.eval()
    
        # STEP 3 : prepare input
        url = "http://images.cocodataset.org/val2017/000000397133.jpg"
        stride = 32
        input_size = 640
        img_size = check_img_size(input_size, s=stride)
        img, img_src = process_image(url, img_size, stride, half=False)
        input_batch = img.unsqueeze(0)
    
        inputs = [input_batch]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/yolo/test_yolo_v6.py:68: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_yolo_v6_yolov6n_obj_det_torchhub], graph_name='pt_yolo_v6_yolov6n_obj_det_torchhub',...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f199223eab0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

/opt/ttforge-toolchain/venv/lib/python3.10/site-packages/forge/compile.py:976: RuntimeError