[internal] Collect workflow data #2470
82 tests run, 7 passed, 63 skipped, 12 failed.
Annotations
Check failure on line 46 in forge/test/models/pytorch/audio/stereo/test_stereo.py
github-actions / TT-Forge-FE Tests
test_stereo.test_stereo[facebook/musicgen-small]
RuntimeError: TT_ASSERT @ /__w/tt-forge-fe/tt-forge-fe/forge/csrc/graph_lib/shape.cpp:135: (i >= 0) && (i < (int)dims_.size())
info:
Trying to access element outside of dimensions: 3
backtrace:
--- tt::graphlib::Shape::operator[](int)
--- tt::passes::commute_through_concat(tt::graphlib::Graph*, tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Node*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, bool*, std::pair<int, int>*, tt::graphlib::OpType*, bool)
--- tt::passes::can_commute_past_op(tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Graph*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, tt::graphlib::Node*)
--- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x18fc4f) [0x7f45ee995c4f]
--- tt::passes::erase_inverse_ops(tt::graphlib::Graph*)
--- tt::run_optimization_graph_passes(tt::graphlib::Graph*)
--- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0xfda39) [0x7f45ee903a39]
--- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0xe0cf5) [0x7f45ee8e6cf5]
--- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x561279e45b32]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x561279e35a97]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x561279e54172]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
--- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x561279e3699e]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x561279e3097f]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
--- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
--- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x561279e5410b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
--- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
--- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x561279e3699e]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
--- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x561279e3699e]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f45a64248b0>
variant = 'facebook/musicgen-small'
@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants)
def test_stereo(record_forge_property, variant):
if variant != "facebook/musicgen-small":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="stereo",
variant=variant,
task=Task.MUSIC_GENERATION,
source=Source.HUGGINGFACE,
)
# Record Forge Property
record_forge_property("model_name", module_name)
framework_model, processor = load_model(variant)
input_ids, attn_mask, decoder_input_ids = load_inputs(framework_model, processor)
inputs = [input_ids, attn_mask, decoder_input_ids]
# Issue: https://github.com/tenstorrent/tt-forge-fe/issues/615
# Forge compile framework model
> compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
forge/test/models/pytorch/audio/stereo/test_stereo.py:46:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/compile.py:253: in compile_main
return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
context = CompileContext(modules=[Module pt_stereo_facebook_musicgen_small_music_generation_hf], graph_name='pt_stereo_facebook_...in_recompile=False, recompile_count=0, target_cycles_offset=0, forge_module=None, compiled_binary=None, attach_to=None)
def run_optimization_pass(context: CompileContext) -> CompileDepth:
"""
Runs optimization passes.
Parameters
----------
context: CompileContext
Compile context
Returns
-------
CompileDepth - next compile stage
"""
compiler_cfg = context.compiler_cfg
graph_name = context.graph_name
graph, intermediate_tensors = context.graph, context.intermediate_tensors
> run_optimization_graph_passes(graph)
E RuntimeError: TT_ASSERT @ /__w/tt-forge-fe/tt-forge-fe/forge/csrc/graph_lib/shape.cpp:135: (i >= 0) && (i < (int)dims_.size())
E info:
E Trying to access element outside of dimensions: 3
E backtrace:
E --- tt::graphlib::Shape::operator[](int)
E --- tt::passes::commute_through_concat(tt::graphlib::Graph*, tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Node*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, bool*, std::pair<int, int>*, tt::graphlib::OpType*, bool)
E --- tt::passes::can_commute_past_op(tt::graphlib::OpNode*, tt::graphlib::OpNode*, tt::graphlib::Graph*, tt::graphlib::Shape*, tt::graphlib::Shape*, bool, tt::graphlib::Node*)
E --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x18fc4f) [0x7f45ee995c4f]
E --- tt::passes::erase_inverse_ops(tt::graphlib::Graph*)
E --- tt::run_optimization_graph_passes(tt::graphlib::Graph*)
E --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0xfda39) [0x7f45ee903a39]
E --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0xe0cf5) [0x7f45ee8e6cf5]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x561279e45b32]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x561279e35a97]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x561279e54172]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x561279e3699e]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x561279e3097f]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
E --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x561279e5410b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x561279e3699e]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x561279e3699e]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
forge/forge/compile.py:829: RuntimeError
github-actions / TT-Forge-FE Tests
test_stable_diffusion_xl.test_stable_diffusion_generation[stable-diffusion-xl-base-1.0]
NotImplementedError: Unknown output type: <class 'PIL.Image.Image'>
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f4591283e20>
variant = 'stable-diffusion-xl-base-1.0'
@pytest.mark.nightly
@pytest.mark.skip_model_analysis
@pytest.mark.parametrize("variant", ["stable-diffusion-xl-base-1.0"])
def test_stable_diffusion_generation(record_forge_property, variant):
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="stereo",
variant=variant,
task=Task.MUSIC_GENERATION,
source=Source.HUGGINGFACE,
)
# Record Forge Property
record_forge_property("model_name", module_name)
# Load the pipeline and set it to use the CPU
pipe = DiffusionPipeline.from_pretrained(f"stabilityai/{variant}", torch_dtype=torch.float32) # Use float32 for CPU
pipe.to("cpu") # Move the model to CPU
# Wrap the pipeline in the wrapper
framework_model = StableDiffusionXLWrapper(pipe)
# Tokenize the prompt to a tensor
tokenizer = pipe.tokenizer
prompt = "An astronaut riding a green horse"
input_tensor = tokenizer(prompt, return_tensors="pt").input_ids
inputs = [input_tensor]
# Forge compile framework model
> compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
forge/test/models/pytorch/multimodal/stable_diffusion/test_stable_diffusion_xl.py:64:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/compile.py:253: in compile_main
return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
next_stage = stage_to_func[current_stage](context)
forge/forge/compile.py:678: in generate_initial_graph
module, module_inputs = convert_to_forge_module(
forge/forge/compile.py:1021: in convert_to_forge_module
forge_module, dev_types, module_inputs = generate_forge_module(
forge/forge/tvm_to_python.py:2074: in generate_forge_module
framework_outputs = framework_mod.cpu_eval_forward(*pytorch_inputs)
forge/forge/module.py:142: in cpu_eval_forward
outputs = flatten_structured_output([outputs])
forge/forge/tvm_utils.py:112: in flatten_structured_output
sub_output = flatten_structured_output(
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
outputs = [<PIL.Image.Image image mode=RGB size=1024x1024 at 0x7F4590FB8490>]
def flatten_structured_output(outputs):
from forge.tensor import Tensor
new_outputs = []
for i in range(len(outputs)):
out = outputs[i]
if isinstance(out, (list, tuple)):
sub_output = flatten_structured_output(
out,
)
new_outputs += sub_output
elif isinstance(out, dict):
sub_output = []
for k, v in out.items():
sub_output.append(v)
sub_output = flatten_structured_output(
sub_output,
)
new_outputs += sub_output
elif isinstance(out, (torch.Tensor, tf.Tensor, Tensor, np.ndarray)):
new_outputs.append(out)
elif out is None:
continue
else:
> raise NotImplementedError(f"Unknown output type: {type(out)}")
E NotImplementedError: Unknown output type: <class 'PIL.Image.Image'>
forge/forge/tvm_utils.py:133: NotImplementedError
Check failure on line 74 in forge/test/models/pytorch/text/codegen/test_codegen.py
github-actions / TT-Forge-FE Tests
test_codegen.test_codegen[Salesforce/codegen-350M-mono]
AssertionError: Data mismatch on output 0 between framework and Forge codegen
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f46e2d42680>
variant = 'Salesforce/codegen-350M-mono'
@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants, ids=variants)
def test_codegen(record_forge_property, variant):
if variant != "Salesforce/codegen-350M-mono":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH, model="codegen", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
)
# Record Forge Property
record_forge_property("model_name", module_name)
# Load model (with tokenizer)
tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
framework_model = download_model(CodeGenForCausalLM.from_pretrained, variant, use_cache=False, return_dict=False)
# Input prompt
input_prompt = "def hello_world():"
# Tokenize input
inputs = tokenizer(
input_prompt,
return_tensors="pt",
max_length=256,
pad_to_max_length=True,
truncation=True,
)
input_ids = inputs["input_ids"]
attn_mask = inputs["attention_mask"]
# Wrapper to get around attention mask
class Wrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, input_ids, attention_mask):
return self.model(input_ids, None, attention_mask)
framework_model = Wrapper(framework_model)
# Sanity run
input_ids = input_ids.to(torch.int32)
attn_mask = attn_mask.to(torch.float32)
inputs = [input_ids, attn_mask]
# Forge compile framework model
> compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
forge/test/models/pytorch/text/codegen/test_codegen.py:74:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/compile.py:253: in compile_main
return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
next_stage = stage_to_func[current_stage](context)
forge/forge/compile.py:678: in generate_initial_graph
module, module_inputs = convert_to_forge_module(
forge/forge/compile.py:1021: in convert_to_forge_module
forge_module, dev_types, module_inputs = generate_forge_module(
forge/forge/tvm_to_python.py:2140: in generate_forge_module
verify_framework_vs_forge_codegen(framework_outputs, forge_outputs, verify_cfg=verify_cfg)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
frame_outputs = [tensor([[[ 4.10871, 3.43310, 2.84573, ..., -4.54513, -4.55066, -4.54938],
[13.21362, 13.68193, 9.95741, ...75, -9.02421],
[ 7.29722, 10.71369, 7.41228, ..., -6.52333, -6.52313, -6.52258]]], grad_fn=<ViewBackward0>)]
forge_outputs = (Forge Tensor: tensor([[[ 4.10957, 3.43417, 2.84655, ..., -4.54479, -4.55031, -4.54904],
[13.21211, 13.679... [ 8.90092, 12.67207, 12.04863, ..., -8.01162, -8.00963, -8.01005]]], grad_fn=<AddBackward0>), DataFormat.Float32,)
verify_cfg = DepricatedVerifyConfig(graph_name='graph', enabled=False, intermediates=False, rtol={torch.float32: None, torch.float1...ent_checking=True, enable_parameter_gradient_checking=True, _input_gradient_queue=None, _parameter_gradient_queue=None)
def verify_framework_vs_forge_codegen(frame_outputs, forge_outputs, verify_cfg):
from forge.verify.compare import compare_tensor_to_golden
test_pass = True
for i, (golden, output) in enumerate(zip(frame_outputs, forge_outputs)):
test_pass &= compare_tensor_to_golden(
f"Framework vs. Forge codegen output {i}", golden, output.value(), is_forge=False, verify_cfg=verify_cfg
)
> assert test_pass, f"Data mismatch on output {i} between framework and Forge codegen"
E AssertionError: Data mismatch on output 0 between framework and Forge codegen
forge/forge/tvm_to_python.py:1987: AssertionError
Check failure on line 76 in forge/test/models/pytorch/text/gptneo/test_gptneo.py
github-actions / TT-Forge-FE Tests
test_gptneo.test_gptneo_causal_lm[EleutherAI/gpt-neo-125M]
AssertionError: Data mismatch on output 0 between framework and Forge codegen
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f4630ff43a0>
variant = 'EleutherAI/gpt-neo-125M'
@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants, ids=variants)
def test_gptneo_causal_lm(record_forge_property, variant):
if variant != "EleutherAI/gpt-neo-125M":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH, model="gptneo", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
)
# Record Forge Property
record_forge_property("model_name", module_name)
# Set random seed for repeatability
torch.manual_seed(42)
# Load tokenizer and model
# Variants: # EleutherAI/gpt-neo-125M, EleutherAI/gpt-neo-1.3B,
# EleutherAI/gpt-neo-2.7B
config = download_model(GPTNeoConfig.from_pretrained, variant)
config_dict = config.to_dict()
config_dict["return_dict"] = False
config_dict["use_cache"] = False
config = GPTNeoConfig(**config_dict)
tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
tokenizer.pad_token = tokenizer.eos_token
model = download_model(GPTNeoForCausalLM.from_pretrained, variant, config=config)
# Sample input text
prompt = "My name is Bert, and I am"
inputs = tokenizer(prompt, return_tensors="pt", max_length=256, pad_to_max_length=True, truncation=True)
# Wrapper to get around attention mask
class Wrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, input_ids, attention_mask):
return self.model(input_ids, None, attention_mask)
framework_model = Wrapper(model)
inputs = [inputs["input_ids"], inputs["attention_mask"]]
# Forge compile framework model
> compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
forge/test/models/pytorch/text/gptneo/test_gptneo.py:76:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/compile.py:253: in compile_main
return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
next_stage = stage_to_func[current_stage](context)
forge/forge/compile.py:678: in generate_initial_graph
module, module_inputs = convert_to_forge_module(
forge/forge/compile.py:1021: in convert_to_forge_module
forge_module, dev_types, module_inputs = generate_forge_module(
forge/forge/tvm_to_python.py:2140: in generate_forge_module
verify_framework_vs_forge_codegen(framework_outputs, forge_outputs, verify_cfg=verify_cfg)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
frame_outputs = [tensor([[[ -2.93149, -3.04098, -5.19186, ..., -11.81308, -8.60934, -6.07345],
[ -6.99371, -5.94796, -1...],
[-38.90412, -38.28091, -42.97676, ..., -69.93178, -49.82716, -37.80742]]], grad_fn=<UnsafeViewBackward0>)]
forge_outputs = (Forge Tensor: tensor([[[ -2.93151, -3.04100, -5.19192, ..., -11.81311, -8.60940, -6.07350],
[ -6.99369,...2, -5.60490, -4.23273, ..., -16.88002, -15.43275, -9.50537]]], grad_fn=<UnsafeViewBackward0>), DataFormat.Float32,)
verify_cfg = DepricatedVerifyConfig(graph_name='graph', enabled=False, intermediates=False, rtol={torch.float32: None, torch.float1...ent_checking=True, enable_parameter_gradient_checking=True, _input_gradient_queue=None, _parameter_gradient_queue=None)
def verify_framework_vs_forge_codegen(frame_outputs, forge_outputs, verify_cfg):
from forge.verify.compare import compare_tensor_to_golden
test_pass = True
for i, (golden, output) in enumerate(zip(frame_outputs, forge_outputs)):
test_pass &= compare_tensor_to_golden(
f"Framework vs. Forge codegen output {i}", golden, output.value(), is_forge=False, verify_cfg=verify_cfg
)
> assert test_pass, f"Data mismatch on output {i} between framework and Forge codegen"
E AssertionError: Data mismatch on output 0 between framework and Forge codegen
forge/forge/tvm_to_python.py:1987: AssertionError
Check failure on line 21 in forge/test/models/pytorch/text/mistral/test_mistral.py
github-actions / TT-Forge-FE Tests
test_mistral.test_mistral[mistralai/Mistral-7B-v0.1]
NameError: name 'Task' is not defined
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f46da5afd00>
variant = 'mistralai/Mistral-7B-v0.1'
@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants, ids=variants)
def test_mistral(record_forge_property, variant):
# Build Module Name
module_name = build_module_name(
> framework=Framework.PYTORCH, model="mistral", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
)
E NameError: name 'Task' is not defined
forge/test/models/pytorch/text/mistral/test_mistral.py:21: NameError
Check failure on line 59 in forge/test/models/pytorch/text/t5/test_t5.py
github-actions / TT-Forge-FE Tests
test_t5.test_t5_generation[t5-base]
NameError: name 'AutoTokenizer' is not defined
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f46da5ad6c0>
variant = 't5-base'
@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants)
def test_t5_generation(record_forge_property, variant):
if variant not in {"t5-small", "google/flan-t5-small", "t5-base", "t5-large"}:
pytest.skip(f"Skipping {variant} due to the current CI/CD pipeline limitations")
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH, model="t5", variant=variant, task=Task.TEXT_GENERATION, source=Source.HUGGINGFACE
)
# Record Forge Property
record_forge_property("model_name", module_name)
# Load tokenizer and model from HuggingFace
# Variants: t5-small, t5-base, t5-large
config = download_model(T5Config.from_pretrained, variant)
config_dict = config.to_dict()
config_dict["return_dict"] = False
config_dict["use_cache"] = False
config = T5Config(**config_dict)
model = download_model(T5ForConditionalGeneration.from_pretrained, variant, config=config)
> tokenizer = AutoTokenizer.from_pretrained(variant)
E NameError: name 'AutoTokenizer' is not defined
forge/test/models/pytorch/text/t5/test_t5.py:59: NameError
Check failure on line 57 in forge/test/models/pytorch/text/xglm/test_xglm.py
github-actions / TT-Forge-FE Tests
test_xglm.test_xglm_causal_lm[facebook/xglm-564M]
RuntimeError: TT_THROW @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/tt_metal/impl/kernels/kernel.cpp:241: tt::exception
info:
1283 unique+common runtime args targeting kernel reader_concat_stick_layout_interleaved_start_id on (x=0,y=0) are too large. Max allowable is 256
backtrace:
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/libtt_metal.so(+0x132ec5) [0x7f45de25bec5]
--- tt::tt_metal::v0::Kernel::validate_runtime_args_size(unsigned long, unsigned long, tt::umd::xy_pair const&)
--- tt::tt_metal::v0::Kernel::set_runtime_args(tt::umd::xy_pair const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/libtt_metal.so(+0x2d21a3) [0x7f45de3fb1a3]
--- tt::tt_metal::v0::SetRuntimeArgs(tt::tt_metal::v0::Program const&, unsigned int, std::variant<tt::umd::xy_pair, CoreRange, CoreRangeSet> const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
--- ttnn::operations::data_movement::detail::concat_multi_core(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, unsigned int, tt::tt_metal::Tensor const&)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x60b8a3) [0x7f45deae28a3]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x2596aea) [0x7f45e0a6daea]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x2596ef4) [0x7f45e0a6def4]
--- void ttnn::device_operation::detail::launch_on_worker_thread<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, tt::tt_metal::v0::IDevice*>(unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, tt::tt_metal::v0::IDevice*&)
--- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::launch_on_single_device<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x2595de9) [0x7f45e0a6cde9]
--- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::invoke<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259020f) [0x7f45e0a6720f]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(_ZN2tt8tt_metal9operation19run_with_autoformatEONS1_15DeviceOperationISt6vectorINS0_6TensorESaIS4_EEEERKS6_RKS3_IN4ttnn10operations12experimental11auto_format12FormatParamsESaISF_EERKS3_INS0_6LayoutESaISK_EERKS3_ISt8optionalIKS4_ESaISR_EERKS3_ISP_ISF_ESaISW_EERKS3_ISP_IS4_ESaIS11_EEh+0x503) [0x7f45e0a64113]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x60aa99) [0x7f45deae1a99]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259c8a2) [0x7f45e0a738a2]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259d08c) [0x7f45e0a7408c]
--- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
--- ttnn::operations::data_movement::concat_impl(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, long, unsigned int, tt::tt_metal::MemoryConfig const&)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x604e6b) [0x7f45deadbe6b]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x5e4fe5) [0x7f45deabbfe5]
--- ttnn::operations::data_movement::ConcatOperation::invoke(unsigned char, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
--- ttnn::operations::data_movement::ConcatOperation::invoke(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x57837c) [0x7f45dea4f37c]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x57828b) [0x7f45dea4f28b]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x5781fc) [0x7f45dea4f1fc]
--- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x577fb4) [0x7f45dea4efb4]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x577a2d) [0x7f45dea4ea2d]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x56e1f0) [0x7f45dea451f0]
--- ttnn::operations::data_movement::ExecuteRepeatInterleave::invoke(tt::tt_metal::Tensor const&, unsigned int, int, std::optional<tt::tt_metal::MemoryConfig> const&)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78f9b4) [0x7f45e249c9b4]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78f70a) [0x7f45e249c70a]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78f65c) [0x7f45e249c65c]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259c8a2) [0x7f45e0a738a2]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259d08c) [0x7f45e0a7408c]
--- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78f323) [0x7f45e249c323]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78ed9d) [0x7f45e249bd9d]
--- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78e897) [0x7f45e249b897]
--- tt::runtime::ttnn::operations::data_movement::run(tt::target::ttnn::RepeatInterleaveOp const*, tt::runtime::ttnn::ProgramContext&)
--- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
--- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
--- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
--- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
--- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x3174f0) [0x7f45eeb1d4f0]
--- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x31743e) [0x7f45eeb1d43e]
--- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0xe0cf5) [0x7f45ee8e6cf5]
--- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x561279e45b32]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x561279e35a97]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x561279e3b574]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
--- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
--- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x561279e5410b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x561279e54172]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
--- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x561279e3699e]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x561279e3097f]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
--- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
--- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
--- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x561279e5410b]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
--- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
--- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
--- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f46da5af0a0>
variant = 'facebook/xglm-564M'
@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants, ids=variants)
def test_xglm_causal_lm(record_forge_property, variant):
if variant != "facebook/xglm-564M":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH, model="xglm", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
)
# Record Forge Property
record_forge_property("model_name", module_name)
config = XGLMConfig.from_pretrained(variant)
config_dict = config.to_dict()
config_dict["return_dict"] = False
config_dict["use_cache"] = False
config = XGLMConfig(**config_dict)
framework_model = download_model(XGLMForCausalLM.from_pretrained, variant, config=config)
tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
tokenizer.pad_token = tokenizer.eos_token
# Input sample
prefix_text = "My name is Thomas and my main"
input_tokens = tokenizer(
prefix_text,
max_length=256,
padding="max_length",
truncation=True,
return_tensors="pt",
)
inputs = [input_tokens["input_ids"], input_tokens["attention_mask"]]
# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
# Model Verification
> verify(inputs, framework_model, compiled_model)
forge/test/models/pytorch/text/xglm/test_xglm.py:57:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7f459146bfa0>
inputs = (tensor([[ 2, 1622, 4432, 67, 10128, 53, 363, 3270, 2, 2, 2, 2, 2, 2, 2, ...0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]))
inputs_and_parameters = [tensor([[ 2, 1622, 4432, 67, 10128, 53, 363, 3270, 2, 2, 2, 2, 2, 2, 2, ... [0.00098],
[0.00098],
[0.00098],
[0.00098],
[0.00098],
[0.00098]]]), ...]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: TT_THROW @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/tt_metal/impl/kernels/kernel.cpp:241: tt::exception
E info:
E 1283 unique+common runtime args targeting kernel reader_concat_stick_layout_interleaved_start_id on (x=0,y=0) are too large. Max allowable is 256
E backtrace:
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/libtt_metal.so(+0x132ec5) [0x7f45de25bec5]
E --- tt::tt_metal::v0::Kernel::validate_runtime_args_size(unsigned long, unsigned long, tt::umd::xy_pair const&)
E --- tt::tt_metal::v0::Kernel::set_runtime_args(tt::umd::xy_pair const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/libtt_metal.so(+0x2d21a3) [0x7f45de3fb1a3]
E --- tt::tt_metal::v0::SetRuntimeArgs(tt::tt_metal::v0::Program const&, unsigned int, std::variant<tt::umd::xy_pair, CoreRange, CoreRangeSet> const&, tt::stl::Span<unsigned int const, 18446744073709551615ul>)
E --- ttnn::operations::data_movement::detail::concat_multi_core(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, unsigned int, tt::tt_metal::Tensor const&)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x60b8a3) [0x7f45deae28a3]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x2596aea) [0x7f45e0a6daea]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x2596ef4) [0x7f45e0a6def4]
E --- void ttnn::device_operation::detail::launch_on_worker_thread<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, tt::tt_metal::v0::IDevice*>(unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, tt::tt_metal::v0::IDevice*&)
E --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::launch_on_single_device<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x2595de9) [0x7f45e0a6cde9]
E --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::invoke<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259020f) [0x7f45e0a6720f]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(_ZN2tt8tt_metal9operation19run_with_autoformatEONS1_15DeviceOperationISt6vectorINS0_6TensorESaIS4_EEEERKS6_RKS3_IN4ttnn10operations12experimental11auto_format12FormatParamsESaISF_EERKS3_INS0_6LayoutESaISK_EERKS3_ISt8optionalIKS4_ESaISR_EERKS3_ISP_ISF_ESaISW_EERKS3_ISP_IS4_ESaIS11_EEh+0x503) [0x7f45e0a64113]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x60aa99) [0x7f45deae1a99]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259c8a2) [0x7f45e0a738a2]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259d08c) [0x7f45e0a7408c]
E --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E --- ttnn::operations::data_movement::concat_impl(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, long, unsigned int, tt::tt_metal::MemoryConfig const&)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x604e6b) [0x7f45deadbe6b]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x5e4fe5) [0x7f45deabbfe5]
E --- ttnn::operations::data_movement::ConcatOperation::invoke(unsigned char, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
E --- ttnn::operations::data_movement::ConcatOperation::invoke(std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, int, std::optional<tt::tt_metal::MemoryConfig> const&, std::optional<tt::tt_metal::Tensor> const&, unsigned int)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x57837c) [0x7f45dea4f37c]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x57828b) [0x7f45dea4f28b]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x5781fc) [0x7f45dea4f1fc]
E --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x577fb4) [0x7f45dea4efb4]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x577a2d) [0x7f45dea4ea2d]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x56e1f0) [0x7f45dea451f0]
E --- ttnn::operations::data_movement::ExecuteRepeatInterleave::invoke(tt::tt_metal::Tensor const&, unsigned int, int, std::optional<tt::tt_metal::MemoryConfig> const&)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78f9b4) [0x7f45e249c9b4]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78f70a) [0x7f45e249c70a]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78f65c) [0x7f45e249c65c]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259c8a2) [0x7f45e0a738a2]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259d08c) [0x7f45e0a7408c]
E --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78f323) [0x7f45e249c323]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78ed9d) [0x7f45e249bd9d]
E --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x78e897) [0x7f45e249b897]
E --- tt::runtime::ttnn::operations::data_movement::run(tt::target::ttnn::RepeatInterleaveOp const*, tt::runtime::ttnn::ProgramContext&)
E --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
E --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
E --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x3174f0) [0x7f45eeb1d4f0]
E --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x31743e) [0x7f45eeb1d43e]
E --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0xe0cf5) [0x7f45ee8e6cf5]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x561279e45b32]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x561279e35a97]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x561279e3b574]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
E --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x561279e5410b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x561279e54172]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x561279e3c39b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x561279e3699e]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x561279e3097f]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x561279e35702]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x561279e3b61d]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x561279e5062c]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x561279f58464]
E --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x561279e5410b]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x561279e32c30]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x561279e30790]
E --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x561279e534d1]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x561279e319ab]
E --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x561279e4638c]
forge/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 59 in forge/test/models/pytorch/vision/alexnet/test_alexnet.py
github-actions / TT-Forge-FE Tests
test_alexnet.test_alexnet_torchhub
RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f45102e89d0>
@pytest.mark.nightly
def test_alexnet_torchhub(record_forge_property):
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="alexnet",
variant="alexnet",
source=Source.TORCH_HUB,
task=Task.IMAGE_CLASSIFICATION,
)
# Record Forge Property
record_forge_property("model_name", module_name)
# Load model
framework_model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "alexnet", pretrained=True)
framework_model.eval()
# Load and pre-process image
try:
torch.hub.download_url_to_file("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
input_image = Image.open("dog.jpg")
preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
img_tensor = preprocess(input_image).unsqueeze(0)
except:
logger.warning(
"Failed to download the image file, replacing input with random tensor. Please check if the URL is up to date"
)
img_tensor = torch.rand(1, 3, 224, 224)
inputs = [img_tensor]
# Forge compile framework model
> compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
forge/test/models/pytorch/vision/alexnet/test_alexnet.py:59:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/compile.py:253: in compile_main
return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
context = CompileContext(modules=[Module pt_alexnet_alexnet_img_cls_torchhub], graph_name='pt_alexnet_alexnet_img_cls_torchhub',...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f46312ff0b0>, compiled_binary=None, attach_to=None)
def run_mlir_compiler(context: CompileContext) -> CompileDepth:
assert context.forge_module is not None
> context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E RuntimeError: Failed to run MLIR compiler pass pipeline.
forge/forge/compile.py:976: RuntimeError
Check failure on line 114 in forge/test/models/pytorch/vision/densenet/test_densenet.py
github-actions / TT-Forge-FE Tests
test_densenet.test_densenet_169_pytorch[densenet169]
RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f4630e26cb0>
variant = 'densenet169'
@pytest.mark.nightly
@pytest.mark.parametrize("variant", ["densenet169"])
def test_densenet_169_pytorch(record_forge_property, variant):
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="densenet",
variant=variant,
source=Source.TORCHVISION,
task=Task.IMAGE_CLASSIFICATION,
)
# Record Forge Property
record_forge_property("model_name", module_name)
# STEP 2: Create Forge module from PyTorch model
framework_model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet169", pretrained=True)
# STEP 3: Run inference on Tenstorrent device
img_tensor = get_input_img()
inputs = [img_tensor]
# Forge compile framework model
compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
# Model Verification
> verify(inputs, framework_model, compiled_model)
forge/test/models/pytorch/vision/densenet/test_densenet.py:114:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/verify/verify.py:302: in verify
co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <forge.compiled_graph_state.CompiledModel object at 0x7f45911fffa0>
inputs = (tensor([[[[-1.92953, -1.92953, -1.91241, ..., -2.03228, -1.94666, -1.92953],
[-1.99803, -1.89528, -1.91241... ..., -0.95041, -1.49072, -1.38614],
[-1.28157, -1.42100, -1.22928, ..., -0.74126, -1.12471, -1.28157]]]]),)
inputs_and_parameters = [tensor([[[[-1.92953, -1.92953, -1.91241, ..., -2.03228, -1.94666, -1.92953],
[-1.99803, -1.89528, -1.91241...2.10225, 1.20796, 2.29127, 1.71784, 2.46689, 2.76570, 1.49687, 1.84175, 1.53622, 2.85604]]]], requires_grad=True), ...]
def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
"""
Run inference on the compiled model.
Parameters
----------
inputs: [Tensor, ...]
Input tensors
Returns
-------
List[Tensor]
Output tensors
"""
self.inputs = [*to_pt_tensors(inputs)]
inputs_and_parameters = [
*self.inputs,
*self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
*self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
]
assert all(
[isinstance(t, torch.Tensor) for t in inputs_and_parameters]
), "All inputs should be torch tensors by now."
if self.training() and isinstance(self.framework_module, PyTorchModule):
for name, param in self.framework_module.module.named_parameters():
if param.requires_grad:
our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
# NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
# module. This is because we want to be able to optimize the parameters both on the device
# (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
# the parameter value, the other side can see the change.
#
# This could change in the future, but for now ensure that our premise is correct.
assert param is our_tensor
logger.info(
f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
)
> all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]
forge/forge/compiled_graph_state.py:253: RuntimeError
Check failure on line 93 in forge/test/models/pytorch/vision/rcnn/test_rcnn.py
github-actions / TT-Forge-FE Tests
test_rcnn.test_rcnn_pytorch
RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f45a63dd360>
@pytest.mark.nightly
def test_rcnn_pytorch(record_forge_property):
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH, model="rcnn", source=Source.TORCHVISION, task=Task.OBJECT_DETECTION
)
# Record Forge Property
record_forge_property("model_name", module_name)
# Load Alexnet Model
framework_model = torchvision.models.alexnet(pretrained=True)
num_classes = 2
num_features = framework_model.classifier[6].in_features
# Create class specific linear SVMs [Refer Section 2 in paper]
svm_layer = nn.Linear(num_features, num_classes)
# Replacing the Alexnet's ImageNet specific 1000-way classification layer with a randomly initialized (N + 1)-way classification layer(where N is the number of object classes, plus 1 for background)
# [Refer Section 2.3.Domain-specific fine-tuning in Paper]
init.normal_(svm_layer.weight, mean=0, std=0.01)
init.constant_(svm_layer.bias, 0)
framework_model.classifier[6] = svm_layer
framework_model.eval()
# Cancel gradient tracking
for param in framework_model.parameters():
param.requires_grad = False
# Image
img = cv2.imread("forge/test/models/files/samples/images/car.jpg")
transform = transforms.Compose(
[
transforms.ToPILImage(),
transforms.Resize((227, 227)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# Selective search - A separate tool for generating proposals(potential regions that might contain objects) which can be fed to actual model
# As it is a pre-processing step,it is implemented on cpu
gs = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
gs.setBaseImage(img)
gs.switchToSelectiveSearchFast()
rects = gs.process()
rects[:, 2] += rects[:, 0]
rects[:, 3] += rects[:, 1]
print("Suggested number of proposals: %d" % len(rects))
# Proposals generated by selective search were fed to a model in a loop manner to compute features.
# [Refer line No.151 in https://github.com/object-detection-algorithm/R-CNN/blob/master/py/car_detector.py]
for idx, rect in enumerate(rects):
xmin, ymin, xmax, ymax = rect
rect_img = img[ymin:ymax, xmin:xmax]
rect_transform = transform(rect_img)
inputs = [rect_transform.unsqueeze(0)]
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="rcnn",
suffix=f"rect_{idx}",
source=Source.TORCHVISION,
task=Task.OBJECT_DETECTION,
)
# Forge compile framework model
> compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
forge/test/models/pytorch/vision/rcnn/test_rcnn.py:93:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/compile.py:253: in compile_main
return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
context = CompileContext(modules=[Module pt_rcnn_base_obj_det_torchvision_rect_0], graph_name='pt_rcnn_base_obj_det_torchvision_...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f4630be6b30>, compiled_binary=None, attach_to=None)
def run_mlir_compiler(context: CompileContext) -> CompileDepth:
assert context.forge_module is not None
> context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E RuntimeError: Failed to run MLIR compiler pass pipeline.
forge/forge/compile.py:976: RuntimeError
Check failure on line 61 in forge/test/models/pytorch/vision/segformer/test_segformer.py
github-actions / TT-Forge-FE Tests
test_segformer.test_segformer_image_classification_pytorch[nvidia/mit-b0]
RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f4590b3f250>
variant = 'nvidia/mit-b0'
@pytest.mark.nightly
@pytest.mark.parametrize("variant", variants_img_classification)
def test_segformer_image_classification_pytorch(record_forge_property, variant):
if variant != "nvidia/mit-b0":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="segformer",
variant=variant,
task=Task.IMAGE_CLASSIFICATION,
source=Source.HUGGINGFACE,
)
# Record Forge Property
record_forge_property("model_name", module_name)
# Set model configurations
config = SegformerConfig.from_pretrained(variant)
config_dict = config.to_dict()
config_dict["return_dict"] = False
config = SegformerConfig(**config_dict)
# Load the model from HuggingFace
framework_model = SegformerForImageClassification.from_pretrained(variant, config=config)
framework_model.eval()
# Load the sample image
pixel_values = get_sample_data(variant)
inputs = [pixel_values]
# Forge compile framework model
> compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
forge/test/models/pytorch/vision/segformer/test_segformer.py:61:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/compile.py:253: in compile_main
return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
context = CompileContext(modules=[Module pt_segformer_nvidia_mit_b0_img_cls_hf], graph_name='pt_segformer_nvidia_mit_b0_img_cls_...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f46312ffbb0>, compiled_binary=None, attach_to=None)
def run_mlir_compiler(context: CompileContext) -> CompileDepth:
assert context.forge_module is not None
> context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E RuntimeError: Failed to run MLIR compiler pass pipeline.
forge/forge/compile.py:976: RuntimeError
Check failure on line 134 in forge/test/models/onnx/vision/yolo/test_yolo_v5.py
github-actions / TT-Forge-FE Tests
test_yolo_v5.test_yolov5_480x480[yolov5s]
RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f46310e8430>
size = 's'
@pytest.mark.nightly
@pytest.mark.parametrize("size", size, ids=["yolov5" + s for s in size])
def test_yolov5_480x480(record_forge_property, size):
if size != "s":
pytest.skip("Skipping due to the current CI/CD pipeline limitations")
# Build Module Name
module_name = build_module_name(
framework=Framework.PYTORCH,
model="yolo_v5",
variant="yolov5" + size,
task="imgcls",
source="torchhub",
suffix="480x480",
)
# Record Forge Property
record_forge_property("model_name", module_name)
framework_model, inputs, _ = generate_model_yoloV5I480_imgcls_torchhub_pytorch(
"ultralytics/yolov5",
size=size,
)
# Forge compile framework model
> compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
forge/test/models/pytorch/vision/yolo/test_yolo_v5.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
forge/forge/compile.py:253: in compile_main
return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
context = CompileContext(modules=[Module pt_yolo_v5_yolov5s_imgcls_torchhub_480x480], graph_name='pt_yolo_v5_yolov5s_imgcls_torc...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f462b510cf0>, compiled_binary=None, attach_to=None)
def run_mlir_compiler(context: CompileContext) -> CompileDepth:
assert context.forge_module is not None
> context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
forge/forge/compile.py:976: RuntimeError