Skip to content

[internal] Collect workflow data #2470

[internal] Collect workflow data

[internal] Collect workflow data #2470

GitHub Actions / TT-Forge-FE Tests failed Feb 3, 2025 in 0s

82 tests run, 5 passed, 63 skipped, 14 failed.

Annotations

Check failure on line 53 in forge/test/models/pytorch/multimodal/clip/test_clip.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_clip.test_clip_pytorch[openai/clip-vit-base-patch32]

RuntimeError: Generated MLIR module failed verification.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6b65d58700>
variant = 'openai/clip-vit-base-patch32'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["openai/clip-vit-base-patch32"])
    def test_clip_pytorch(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="clip",
            variant=variant,
            suffix="text",
            source=Source.HUGGINGFACE,
            task=Task.TEXT_GENERATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load processor and model from HuggingFace
        model = download_model(CLIPModel.from_pretrained, variant, torchscript=True)
        processor = download_model(CLIPProcessor.from_pretrained, variant)
    
        # Load image from the IAM dataset
        url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        image = Image.open(requests.get(url, stream=True).raw)
    
        # Process image
        text = [
            "a photo of a cat",
            "a photo of a dog",
        ]
        inputs = processor(text=text, images=image, return_tensors="pt")
    
        inputs = [inputs["input_ids"], inputs["pixel_values"], inputs["attention_mask"]]
        framework_model = CLIPTextWrapper(model)
        inputs = [inputs[0], inputs[2]]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/multimodal/clip/test_clip.py:53: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_clip_openai_clip_vit_base_patch32_text_gen_hf_text], graph_name='pt_clip_openai_clip...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f6a885466f0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Generated MLIR module failed verification.

forge/forge/compile.py:976: RuntimeError

Check failure on line 70 in forge/test/models/pytorch/text/gemma/test_gemma_2b.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_gemma_2b.test_gemma_2b[google/gemma-2b]

RuntimeError: Input count mismatch: expected 366, got 367
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a88381360>
variant = 'google/gemma-2b'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_gemma_2b(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="gemma",
            variant=variant,
            source=Source.HUGGINGFACE,
            task=Task.TEXT_GENERATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Random see for reproducibility
        torch.manual_seed(42)
    
        config = download_model(GemmaConfig.from_pretrained, variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config_dict["use_cache"] = False
        config = GemmaConfig(**config_dict)
        framework_model = download_model(GemmaForCausalLM.from_pretrained, variant, config=config)
    
        # Load tokenizer
        tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
        tokenizer.pad_token = tokenizer.eos_token
    
        # Sample input
        prompt = "What is your favorite city?"
        inputs = tokenizer(prompt, return_tensors="pt")
    
        # Sanity run
        generate_ids = framework_model.generate(inputs.input_ids, max_length=30)
        generated_text = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[
            0
        ]
    
        print(f"Sanity run generated text: {generated_text}")
    
        input_ids = inputs["input_ids"]
        attn_mask = inputs["attention_mask"]
    
        inputs = [input_ids, attn_mask]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/gemma/test_gemma_2b.py:70: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f6a83d5a0b0>
inputs = (tensor([[     2,   1841,    603,    861,   7827,   3413, 235336]]), tensor([[1, 1, 1, 1, 1, 1, 1]]))
inputs_and_parameters = [tensor([[     2,   1841,    603,    861,   7827,   3413, 235336]]), tensor([[1, 1, 1, 1, 1, 1, 1]]), tensor([45.25483...7e-03, 1.06697e-03, 9.92890e-04, 9.23956e-04, 8.59808e-04, 8.00113e-04, 7.44563e-04, 6.92869e-04, 6.44765e-04]]]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Input count mismatch: expected 366, got 367

forge/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 54 in forge/test/models/pytorch/text/qwen/test_qwen.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_qwen.test_qwen1_5_causal_lm[Qwen/Qwen1.5-0.5B]

RuntimeError: Input count mismatch: expected 534, got 535
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a8370a8c0>
variant = 'Qwen/Qwen1.5-0.5B'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["Qwen/Qwen1.5-0.5B"])
    def test_qwen1_5_causal_lm(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="qwen1.5", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Setup model configuration
        config = Qwen2Config.from_pretrained(variant)
        config.use_cache = False
        config.return_dict = False
    
        # Load model and tokenizer with config
        framework_model = Qwen2ForCausalLM.from_pretrained(variant, config=config)
        tokenizer = Qwen2Tokenizer.from_pretrained(variant)
        tokenizer.pad_token, tokenizer.pad_token_id = (tokenizer.eos_token, tokenizer.eos_token_id)
    
        # Disable DynamicCache
        # See: https://github.com/tenstorrent/tt-buda/issues/42
        framework_model._supports_cache_class = False
    
        # Example usage
        batch_size = 1
        prompt = ["My name is Jim Keller and"] * batch_size
    
        inputs = tokenizer(prompt)
    
        input_ids = torch.tensor(inputs["input_ids"])
        attention_mask = torch.tensor(inputs["attention_mask"])
    
        inputs = [input_ids, attention_mask]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/text/qwen/test_qwen.py:54: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f6a83771c60>
inputs = (tensor([[ 5050,   829,   374, 11387, 65503,   323]]), tensor([[1, 1, 1, 1, 1, 1]]))
inputs_and_parameters = [tensor([[ 5050,   829,   374, 11387, 65503,   323]]), tensor([[1, 1, 1, 1, 1, 1]]), tensor([1.00000e-06]), tensor([[[...., 0., 0.],
          [0., 0., 0.,  ..., 0., 1., 0.],
          [0., 0., 0.,  ..., 0., 0., 1.]]]]), tensor([-1.]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Input count mismatch: expected 534, got 535

forge/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 48 in forge/test/models/pytorch/text/squeezebert/test_squeezebert.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_squeezebert.test_squeezebert_sequence_classification_pytorch[squeezebert/squeezebert-mnli]

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a65fba170>
variant = 'squeezebert/squeezebert-mnli'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["squeezebert/squeezebert-mnli"])
    def test_squeezebert_sequence_classification_pytorch(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="squeezebert",
            variant=variant,
            task=Task.SEQUENCE_CLASSIFICATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load Bart tokenizer and model from HuggingFace
        tokenizer = download_model(AutoTokenizer.from_pretrained, variant)
        framework_model = download_model(AutoModelForSequenceClassification.from_pretrained, variant)
    
        # Example from multi-nli validation set
        text = """Hello, my dog is cute"""
    
        # Data preprocessing
        input_tokens = tokenizer.encode(
            text,
            max_length=128,
            padding="max_length",
            truncation=True,
            return_tensors="pt",
        )
    
        inputs = [input_tokens]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/text/squeezebert/test_squeezebert.py:48: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_squeezebert_squeezebert_squeezebert_mnli_seq_cls_hf], graph_name='pt_squeezebert_squ...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f6a65bc7230>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

forge/forge/compile.py:976: RuntimeError

Check failure on line 59 in forge/test/models/pytorch/text/t5/test_t5.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_t5.test_t5_generation[google_flan_t5_small]

NameError: name 'AutoTokenizer' is not defined
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a65fb97e0>
variant = 'google/flan-t5-small'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_t5_generation(record_forge_property, variant):
        if variant not in {"t5-small", "google/flan-t5-small", "t5-base", "t5-large"}:
            pytest.skip(f"Skipping {variant} due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="t5", variant=variant, task=Task.TEXT_GENERATION, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load tokenizer and model from HuggingFace
        # Variants: t5-small, t5-base, t5-large
    
        config = download_model(T5Config.from_pretrained, variant)
        config_dict = config.to_dict()
        config_dict["return_dict"] = False
        config_dict["use_cache"] = False
        config = T5Config(**config_dict)
        model = download_model(T5ForConditionalGeneration.from_pretrained, variant, config=config)
>       tokenizer = AutoTokenizer.from_pretrained(variant)
E       NameError: name 'AutoTokenizer' is not defined

forge/test/models/pytorch/text/t5/test_t5.py:59: NameError

Check failure on line 49 in forge/test/models/pytorch/timeseries/nbeats/test_nbeats.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_nbeats.test_nbeats_with_seasonality_basis[seasionality_basis]

RuntimeError: Tensor 4 - stride mismatch: expected [24, 1], got [1, 12]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a65fbab90>
variant = 'seasionality_basis'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["seasionality_basis"])
    def test_nbeats_with_seasonality_basis(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="nbeats", variant=variant, task=Task.CAUSAL_LM, source=Source.HUGGINGFACE
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        x, x_mask = get_electricity_dataset_input()
    
        framework_model = NBeatsWithSeasonalityBasis(
            input_size=72,
            output_size=24,
            num_of_harmonics=1,
            stacks=30,
            layers=4,
            layer_size=2048,
        )
        framework_model.eval()
    
        inputs = [x, x_mask]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/timeseries/nbeats/test_nbeats.py:49: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f6a8374b880>
inputs = (tensor([[[[ 160.50679,  156.32184,  146.39499,  ...,  264.03345,  169.91641,  163.64682],
          [  79.53951,   85...[1., 1., 1.,  ..., 1., 1., 1.],
          [1., 1., 1.,  ..., 1., 1., 1.],
          [1., 1., 1.,  ..., 1., 1., 1.]]]]))
inputs_and_parameters = [tensor([[[[ 160.50679,  156.32184,  146.39499,  ...,  264.03345,  169.91641,  163.64682],
          [  79.53951,   85...0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 4 - stride mismatch: expected [24, 1], got [1, 12]

forge/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 51 in forge/test/models/pytorch/vision/autoencoder/test_autoencoder.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_autoencoder.test_conv_ae_pytorch

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a83c71b40>

    @pytest.mark.nightly
    def test_conv_ae_pytorch(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="autoencoder", variant="conv", task=Task.IMAGE_ENCODING, source=Source.GITHUB
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Instantiate model
        # NOTE: The model has not been pre-trained or fine-tuned.
        # This is for demonstration purposes only.
        framework_model = ConvAE()
    
        # Define transform to normalize data
        transform = transforms.Compose(
            [
                transforms.ToTensor(),
                transforms.Normalize((0.1307,), (0.3081,)),
            ]
        )
    
        # Load sample from MNIST dataset
        dataset = load_dataset("mnist")
        sample = dataset["train"][0]["image"]
        sample_tensor = transform(sample).unsqueeze(0)
    
        inputs = [sample_tensor]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/autoencoder/test_autoencoder.py:51: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_autoencoder_conv_img_enc_github], graph_name='pt_autoencoder_conv_img_enc_github', c...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f6c3aa7e3b0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

forge/forge/compile.py:976: RuntimeError

Check failure on line 84 in forge/test/models/pytorch/vision/densenet/test_densenet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_densenet.test_densenet_161_pytorch[densenet161]

RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a662cd3f0>
variant = 'densenet161'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", ["densenet161"])
    def test_densenet_161_pytorch(record_forge_property, variant):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="densenet",
            variant=variant,
            source=Source.TORCHVISION,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # STEP 2: Create Forge module from PyTorch model
        framework_model = download_model(torch.hub.load, "pytorch/vision:v0.10.0", "densenet161", pretrained=True)
    
        # STEP 3: Run inference on Tenstorrent device
        img_tensor = get_input_img()
        inputs = [img_tensor]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/densenet/test_densenet.py:84: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f692a6951e0>
inputs = (tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241... ..., -0.95041, -1.49072, -1.38614],
          [-1.28157, -1.42100, -1.22928,  ..., -0.74126, -1.12471, -1.28157]]]]),)
inputs_and_parameters = [tensor([[[[-1.92953, -1.92953, -1.91241,  ..., -2.03228, -1.94666, -1.92953],
          [-1.99803, -1.89528, -1.91241....22809e+00, 3.30098e+00, 2.08003e+00, 3.74907e+00, 2.18630e+00, 2.48350e+00, 3.65909e+00]]]], requires_grad=True), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 0 - stride mismatch: expected [150528, 50176, 224, 1], got [3, 1, 672, 3]

forge/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 93 in forge/test/models/pytorch/vision/efficientnet/test_efficientnet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_efficientnet.test_efficientnet_timm[efficientnet_b0]

RuntimeError: TT_THROW @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/tt_metal/impl/program/program.cpp:895: tt::exception
info:
Statically allocated circular buffers on core range [(x=0,y=0) - (x=6,y=6)] grow to 1942368 B which is beyond max L1 size of 1499136 B
backtrace:
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/libtt_metal.so(+0x16145b) [0x7f6b9fade45b]
 --- tt::tt_metal::detail::Program_::validate_circular_buffer_region(tt::tt_metal::v0::IDevice const*)
 --- tt::tt_metal::v0::EnqueueProgram(tt::tt_metal::CommandQueue&, tt::tt_metal::v0::Program&, bool)
 --- void ttnn::device_operation::detail::launch_on_worker_thread<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, tt::tt_metal::v0::IDevice*>(unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, tt::tt_metal::v0::IDevice*&)
 --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::launch_on_single_device<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x2595de9) [0x7f6ba22c0de9]
 --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::invoke<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259020f) [0x7f6ba22bb20f]
 --- std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > tt::tt_metal::operation::run_without_autoformat<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >&&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&, unsigned char)
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x4406c5) [0x7f6ba016b6c5]
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259c8a2) [0x7f6ba22c78a2]
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259d08c) [0x7f6ba22c808c]
 --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(_ZN4ttnn10operations4conv6conv2d18optimized_conv_newERKN2tt8tt_metal6TensorES7_St8optionalIS6_ERKNS0_14sliding_window19SlidingWindowConfigEjjbbRKNS2_34OptimizedConvParallelizationConfigERKNS2_24OptimizedConvBlockConfigERKNS4_12MemoryConfigENS4_8DataTypeESt5arrayIjLm4EEbRKSt7variantIJNS_28GrayskullComputeKernelConfigENS_27WormholeComputeKernelConfigEEEbbbbb+0x6bd) [0x7f6ba010411d]
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(_ZN4ttnn10operations4conv6conv2d6conv2dIN2tt8tt_metal2v07IDeviceEEESt5tupleIJNS5_6TensorEjjS9_St8optionalIS9_EEERKS9_SE_PT_jjjjjSt5arrayIjLm2EESI_SI_SI_jSA_ISD_ERKSA_IKNS2_12Conv2dConfigEERKSA_IKSt7variantIJNS_28GrayskullComputeKernelConfigENS_27WormholeComputeKernelConfigEEEERKSA_IKNS5_12MemoryConfigEE+0xf42) [0x7f6ba00f3172]
 --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x778193) [0x7f6ba3c7f193]
 --- tt::runtime::ttnn::operations::conv::run(tt::target::ttnn::Conv2dOp const*, tt::runtime::ttnn::ProgramContext&)
 --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
 --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
 --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
 --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
 --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x3174f0) [0x7f6bb44004f0]
 --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x31743e) [0x7f6bb440043e]
 --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0xe0cf5) [0x7f6bb41c9cf5]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x560f75d0eb32]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x560f75d0539b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x560f75cfea97]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x560f75d04574]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x560f75d1962c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x560f75e21464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x560f75d1d10b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x560f75cf9790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x560f75d1d172]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x560f75d1c4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x560f75d0461d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x560f75d1962c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x560f75e21464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x560f75d0539b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x560f75cff99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x560f75cf997f]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x560f75d1c4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x560f75d0461d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x560f75d1962c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x560f75e21464]
 --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x560f75d1d10b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x560f75cf9790]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x560f75d1c4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x560f75cf9790]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x560f75d1c4d1]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x560f75d0461d]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x560f75d1962c]
 --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x560f75e21464]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x560f75d0539b]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x560f75cff99e]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
 --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a83c723b0>
variant = 'efficientnet_b0'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_efficientnet_timm(record_forge_property, variant):
        if variant != "efficientnet_b0":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="efficientnet",
            variant=variant,
            source=Source.TIMM,
            task=Task.IMAGE_CLASSIFICATION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # Load model
        framework_model = download_model(timm.create_model, variant, pretrained=True)
        framework_model.eval()
    
        # Load and pre-process image
        try:
            url, filename = (
                "https://github.com/pytorch/hub/raw/master/images/dog.jpg",
                "dog.jpg",
            )
            urllib.request.urlretrieve(url, filename)
            img = Image.open(filename).convert("RGB")
            config = resolve_data_config({}, model=framework_model)
            transform = create_transform(**config)
            img_tensor = transform(img).unsqueeze(0)
        except:
            logger.warning(
                "Failed to download the image file, replacing input with random tensor. Please check if the URL is up to date"
            )
            img_tensor = torch.rand(1, 3, 224, 224)
    
        inputs = [img_tensor]
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/efficientnet/test_efficientnet.py:93: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f6c3aceb3d0>
inputs = (tensor([[[[-1.92953, -1.92953, -1.92953,  ..., -2.04940, -1.92953, -1.92953],
          [-2.01516, -1.87816, -1.89528... ..., -0.91556, -1.61272, -1.40357],
          [-1.26414, -1.43843, -1.19442,  ..., -0.67155, -1.14213, -1.28157]]]]),)
inputs_and_parameters = [tensor([[[[-1.92953, -1.92953, -1.92953,  ..., -2.04940, -1.92953, -1.92953],
          [-2.01516, -1.87816, -1.89528...2.44088, 3.86180, 3.68086, 2.09527, 3.81788, 4.29909, 4.17497, 2.64127, 3.81770, 3.27010]]]], requires_grad=True), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: TT_THROW @ /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal/tt_metal/impl/program/program.cpp:895: tt::exception
E       info:
E       Statically allocated circular buffers on core range [(x=0,y=0) - (x=6,y=6)] grow to 1942368 B which is beyond max L1 size of 1499136 B
E       backtrace:
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/libtt_metal.so(+0x16145b) [0x7f6b9fade45b]
E        --- tt::tt_metal::detail::Program_::validate_circular_buffer_region(tt::tt_metal::v0::IDevice const*)
E        --- tt::tt_metal::v0::EnqueueProgram(tt::tt_metal::CommandQueue&, tt::tt_metal::v0::Program&, bool)
E        --- void ttnn::device_operation::detail::launch_on_worker_thread<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, tt::tt_metal::v0::IDevice*>(unsigned char, long, tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, tt::tt_metal::v0::IDevice*&)
E        --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::launch_on_single_device<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x2595de9) [0x7f6ba22c0de9]
E        --- tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_return_value_t ttnn::device_operation::detail::invoke<tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > > >(unsigned char, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::operation_attributes_t const&, tt::tt_metal::operation::OldInfraDeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >::tensor_args_t const&)
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259020f) [0x7f6ba22bb20f]
E        --- std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > tt::tt_metal::operation::run_without_autoformat<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(tt::tt_metal::operation::DeviceOperation<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >&&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&, unsigned char)
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x4406c5) [0x7f6ba016b6c5]
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259c8a2) [0x7f6ba22c78a2]
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(+0x259d08c) [0x7f6ba22c808c]
E        --- void tt::tt_metal::operation::launch_op_func<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > >(std::function<std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > (std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> > const&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > > const&, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > > const&)> const&, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >, std::vector<tt::tt_metal::Tensor, std::allocator<tt::tt_metal::Tensor> >&, std::vector<std::optional<tt::tt_metal::Tensor const>, std::allocator<std::optional<tt::tt_metal::Tensor const> > >, std::vector<std::optional<tt::tt_metal::Tensor>, std::allocator<std::optional<tt::tt_metal::Tensor> > >, bool)
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(_ZN4ttnn10operations4conv6conv2d18optimized_conv_newERKN2tt8tt_metal6TensorES7_St8optionalIS6_ERKNS0_14sliding_window19SlidingWindowConfigEjjbbRKNS2_34OptimizedConvParallelizationConfigERKNS2_24OptimizedConvBlockConfigERKNS4_12MemoryConfigENS4_8DataTypeESt5arrayIjLm4EEbRKSt7variantIJNS_28GrayskullComputeKernelConfigENS_27WormholeComputeKernelConfigEEEbbbbb+0x6bd) [0x7f6ba010411d]
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/third_party/tt-metal/src/tt-metal-build/lib/_ttnn.so(_ZN4ttnn10operations4conv6conv2d6conv2dIN2tt8tt_metal2v07IDeviceEEESt5tupleIJNS5_6TensorEjjS9_St8optionalIS9_EEERKS9_SE_PT_jjjjjSt5arrayIjLm2EESI_SI_SI_jSA_ISD_ERKSA_IKNS2_12Conv2dConfigEERKSA_IKSt7variantIJNS_28GrayskullComputeKernelConfigENS_27WormholeComputeKernelConfigEEEERKSA_IKNS5_12MemoryConfigEE+0xf42) [0x7f6ba00f3172]
E        --- /__w/tt-forge-fe/tt-forge-fe/third_party/tt-mlir/build/lib/SharedLib/libTTMLIR.so(+0x778193) [0x7f6ba3c7f193]
E        --- tt::runtime::ttnn::operations::conv::run(tt::target::ttnn::Conv2dOp const*, tt::runtime::ttnn::ProgramContext&)
E        --- tt::runtime::ttnn::runProgram(tt::tt_metal::distributed::MeshDevice&, tt::runtime::Binary, unsigned int, std::vector<tt::tt_metal::Tensor*, std::allocator<tt::tt_metal::Tensor*> > const&)
E        --- tt::runtime::ttnn::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E        --- tt::runtime::submit(tt::runtime::Device, tt::runtime::Binary, unsigned int, std::vector<tt::runtime::Tensor, std::allocator<tt::runtime::Tensor> > const&)
E        --- tt::run_binary(tt::runtime::Binary&, int, std::vector<at::Tensor, std::allocator<at::Tensor> > const&)
E        --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x3174f0) [0x7f6bb44004f0]
E        --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0x31743e) [0x7f6bb440043e]
E        --- /__w/tt-forge-fe/tt-forge-fe/forge/forge/_C.so(+0xe0cf5) [0x7f6bb41c9cf5]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x18ab32) [0x560f75d0eb32]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x560f75d0539b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x59c7) [0x560f75cfea97]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0xc4) [0x560f75d04574]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x560f75d1962c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x560f75e21464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x560f75d1d10b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x560f75cf9790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0x122) [0x560f75d1d172]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x560f75d1c4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x560f75d0461d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x560f75d1962c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x560f75e21464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x560f75d0539b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x560f75cff99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x8af) [0x560f75cf997f]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x560f75d1c4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x560f75d0461d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x560f75d1962c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x560f75e21464]
E        --- /opt/ttforge-toolchain/venv/bin/python(PyObject_Call+0xbb) [0x560f75d1d10b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x560f75cf9790]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x560f75d1c4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x6c0) [0x560f75cf9790]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x1984d1) [0x560f75d1c4d1]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_FastCallDictTstate+0x16d) [0x560f75d0461d]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_Call_Prepend+0x5c) [0x560f75d1962c]
E        --- /opt/ttforge-toolchain/venv/bin/python(+0x29d464) [0x560f75e21464]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyObject_MakeTpCall+0x25b) [0x560f75d0539b]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x68ce) [0x560f75cff99e]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x2b60) [0x560f75cfbc30]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x18db) [0x560f75cfa9ab]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyFunction_Vectorcall+0x7c) [0x560f75d0f38c]
E        --- /opt/ttforge-toolchain/venv/bin/python(_PyEval_EvalFrameDefault+0x5632) [0x560f75cfe702]

forge/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 172 in forge/test/models/pytorch/vision/hrnet/test_hrnet.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_hrnet.test_hrnet_timm_pytorch[hrnet_w18_small]

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a65d5b5b0>
variant = 'hrnet_w18_small'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_hrnet_timm_pytorch(record_forge_property, variant):
        if variant != "hrnet_w18_small":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="hrnet", variant=variant, source=Source.TIMM, task=Task.POSE_ESTIMATION
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, inputs, _ = generate_model_hrnet_imgcls_timm_pytorch(
            variant,
        )
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/hrnet/test_hrnet.py:172: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_hrnet_hrnet_w18_small_pose_estimation_timm], graph_name='pt_hrnet_hrnet_w18_small_po...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f6a70eba270>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

forge/forge/compile.py:976: RuntimeError

Check failure on line 45 in forge/test/models/pytorch/vision/inception/test_inception_v4.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_inception_v4.test_inception_v4_osmr_pytorch

RuntimeError: Tensor 47 - stride mismatch: expected [1225, 1], got [0, 0]
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a65d5a170>

    @pytest.mark.nightly
    def test_inception_v4_osmr_pytorch(record_forge_property):
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH, model="inception", variant="v4", source=Source.OSMR, task=Task.IMAGE_CLASSIFICATION
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, inputs = generate_model_inceptionV4_imgcls_osmr_pytorch("inceptionv4")
    
        # Forge compile framework model
        compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)
    
        # Model Verification
>       verify(inputs, framework_model, compiled_model)

forge/test/models/pytorch/vision/inception/test_inception_v4.py:45: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/verify/verify.py:302: in verify
    co_out = compiled_model(*inputs)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

self = <forge.compiled_graph_state.CompiledModel object at 0x7f6a83796b30>
inputs = (tensor([[[[-2.08365, -2.03228, -1.99803,  ..., -2.04940, -2.03228, -1.99803],
          [-2.04940, -2.03228, -2.04940... ..., -0.63669, -0.95041, -0.63669],
          [-0.60183, -0.79355, -1.05499,  ..., -0.49725, -1.10728, -0.91556]]]]),)
inputs_and_parameters = [tensor([[[[-2.08365, -2.03228, -1.99803,  ..., -2.04940, -2.03228, -1.99803],
          [-2.04940, -2.03228, -2.04940...0., 0.,  ..., 0., 0., 0.],
          [0., 0., 0.,  ..., 0., 0., 0.],
          [0., 0., 0.,  ..., 0., 0., 0.]]]]), ...]

    def __call__(self, *inputs: AnyTensor) -> List[torch.Tensor]:
        """
        Run inference on the compiled model.
    
        Parameters
        ----------
        inputs: [Tensor, ...]
            Input tensors
    
        Returns
        -------
        List[Tensor]
            Output tensors
        """
        self.inputs = [*to_pt_tensors(inputs)]
    
        inputs_and_parameters = [
            *self.inputs,
            *self.fwd_compiled_graph_state.get_ordered_constant_tensors(),
            *self.fwd_compiled_graph_state.get_ordered_parameter_tensors(),
        ]
    
        assert all(
            [isinstance(t, torch.Tensor) for t in inputs_and_parameters]
        ), "All inputs should be torch tensors by now."
    
        if self.training() and isinstance(self.framework_module, PyTorchModule):
            for name, param in self.framework_module.module.named_parameters():
                if param.requires_grad:
                    our_tensor = self.fwd_compiled_graph_state.get_parameter_tensor(name)
    
                    # NOTE: for parameters that require gradients, we want to share the same tensor with the PyTorch
                    # module. This is because we want to be able to optimize the parameters both on the device
                    # (through our runtime) and via the torch optimizers. So this ensures that whichever side updates
                    # the parameter value, the other side can see the change.
                    #
                    # This could change in the future, but for now ensure that our premise is correct.
                    assert param is our_tensor
    
        logger.info(
            f"Running model {self.framework_module.get_name()} {self.fwd_compiled_graph_state.graph.get_name()} on device..."
        )
>       all_outputs = run_binary(self.compiled_binary, int(ProgramId.FORWARD), inputs_and_parameters)
E       RuntimeError: Tensor 47 - stride mismatch: expected [1225, 1], got [0, 0]

forge/forge/compiled_graph_state.py:253: RuntimeError

Check failure on line 53 in forge/test/models/pytorch/vision/vit/test_vit.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_vit.test_vit_classify_224_hf_pytorch[google/vit-base-patch16-224]

RuntimeError: Failed to run MLIR compiler pass pipeline.
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6c493163b0>
variant = 'google/vit-base-patch16-224'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants, ids=variants)
    def test_vit_classify_224_hf_pytorch(record_forge_property, variant):
        if variant != "google/vit-base-patch16-224":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="vit",
            variant=variant,
            task=Task.IMAGE_CLASSIFICATION,
            source=Source.HUGGINGFACE,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, inputs, _ = generate_model_vit_imgcls_hf_pytorch(variant)
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/vit/test_vit.py:53: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_vit_google_vit_base_patch16_224_img_cls_hf], graph_name='pt_vit_google_vit_base_patc...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f6a6634ae30>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Failed to run MLIR compiler pass pipeline.

forge/forge/compile.py:976: RuntimeError

Check failure on line 52 in forge/test/models/onnx/vision/yolo/test_yolo_v5.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_yolo_v5.test_yolov5_320x320[yolov5s]

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a6e600040>
size = 's'

    @pytest.mark.nightly
    @pytest.mark.parametrize("size", size, ids=["yolov5" + s for s in size])
    def test_yolov5_320x320(record_forge_property, size):
        if size != "s":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="yolo_v5",
            variant="yolov5" + size,
            task="imgcls",
            source="torchhub",
            suffix="320x320",
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        framework_model, inputs, _ = generate_model_yoloV5I320_imgcls_torchhub_pytorch(
            "ultralytics/yolov5",
            size=size,
        )
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/yolo/test_yolo_v5.py:52: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_yolo_v5_yolov5s_imgcls_torchhub_320x320], graph_name='pt_yolo_v5_yolov5s_imgcls_torc...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f6a73cb1770>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

forge/forge/compile.py:976: RuntimeError

Check failure on line 68 in forge/test/models/pytorch/vision/yolo/test_yolo_v6.py

See this annotation in the file changed.

@github-actions github-actions / TT-Forge-FE Tests

test_yolo_v6.test_yolo_v6_pytorch[yolov6n]

RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph
Raw output
record_forge_property = <function record_property.<locals>.append_property at 0x7f6a6e739cf0>
variant = 'yolov6n'

    @pytest.mark.nightly
    @pytest.mark.parametrize("variant", variants)
    def test_yolo_v6_pytorch(record_forge_property, variant):
        if variant != "yolov6n":
            pytest.skip("Skipping due to the current CI/CD pipeline limitations")
    
        # Build Module Name
        module_name = build_module_name(
            framework=Framework.PYTORCH,
            model="yolo_v6",
            variant=variant,
            source=Source.TORCH_HUB,
            task=Task.OBJECT_DETECTION,
        )
    
        # Record Forge Property
        record_forge_property("model_name", module_name)
    
        # STEP 2 :prepare model
        url = f"https://github.com/meituan/YOLOv6/releases/download/0.3.0/{variant}.pt"
        weights = f"{variant}.pt"
    
        try:
            response = requests.get(url)
            with open(weights, "wb") as file:
                file.write(response.content)
            print(f"Downloaded {url} to {weights}")
        except Exception as e:
            print(f"Error downloading {url}: {e}")
    
        model = YOLOV6(weights)
        framework_model = model.model
        framework_model.eval()
    
        # STEP 3 : prepare input
        url = "http://images.cocodataset.org/val2017/000000397133.jpg"
        stride = 32
        input_size = 640
        img_size = check_img_size(input_size, s=stride)
        img, img_src = process_image(url, img_size, stride, half=False)
        input_batch = img.unsqueeze(0)
    
        inputs = [input_batch]
    
        # Forge compile framework model
>       compiled_model = forge.compile(framework_model, sample_inputs=inputs, module_name=module_name)

forge/test/models/pytorch/vision/yolo/test_yolo_v6.py:68: 
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 
forge/forge/compile.py:253: in compile_main
    return forge_compile_from_context(compile_context)
forge/forge/compile.py:295: in forge_compile_from_context
    next_stage = stage_to_func[current_stage](context)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ 

context = CompileContext(modules=[Module pt_yolo_v6_yolov6n_obj_det_torchhub], graph_name='pt_yolo_v6_yolov6n_obj_det_torchhub',...cles_offset=0, forge_module=<forge._C.ForgeGraphModule object at 0x7f692b1d88b0>, compiled_binary=None, attach_to=None)

    def run_mlir_compiler(context: CompileContext) -> CompileDepth:
        assert context.forge_module is not None
    
>       context.compiled_binary = forge._C.run_mlir_compiler(context.forge_module)
E       RuntimeError: Found Unsupported operations while lowering from TTForge to TTIR in forward graph

forge/forge/compile.py:976: RuntimeError