From 882b110e7bf0dd11023cc7d6cdc127d95b8f467e Mon Sep 17 00:00:00 2001 From: Ashok Kumar Kannan Date: Fri, 15 Nov 2024 13:07:43 +0000 Subject: [PATCH] Add support for mod(remainder) op --- forge/csrc/passes/lower_to_mlir.cpp | 1 + forge/forge/op/__init__.py | 1 + forge/forge/op/eltwise_binary.py | 4 ++++ forge/forge/op/eval/forge/__init__.py | 1 + forge/forge/op/eval/forge/eltwise_binary.py | 1 + forge/forge/tvm_to_python.py | 2 ++ forge/test/mlir/test_ops.py | 22 +++++++++++++++++++ .../high_prio/nlp/pytorch/test_opt.py | 2 +- 8 files changed, 33 insertions(+), 1 deletion(-) diff --git a/forge/csrc/passes/lower_to_mlir.cpp b/forge/csrc/passes/lower_to_mlir.cpp index a1e3cd45f..55fb6be50 100644 --- a/forge/csrc/passes/lower_to_mlir.cpp +++ b/forge/csrc/passes/lower_to_mlir.cpp @@ -577,6 +577,7 @@ class MLIRGenerator lowering_handler_map["subtract"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["transpose"] = &MLIRGenerator::emit_mlir_ttforge_op; lowering_handler_map["unsqueeze"] = &MLIRGenerator::emit_mlir_ttforge_op; + lowering_handler_map["remainder"] = &MLIRGenerator::emit_mlir_ttforge_op; } }; } // namespace diff --git a/forge/forge/op/__init__.py b/forge/forge/op/__init__.py index 800487fdf..2afe64792 100644 --- a/forge/forge/op/__init__.py +++ b/forge/forge/op/__init__.py @@ -23,6 +23,7 @@ Equal, NotEqual, LogicalAnd, + Remainder, ) from .eltwise_unary import ( Exp, diff --git a/forge/forge/op/eltwise_binary.py b/forge/forge/op/eltwise_binary.py index 3171bdcbf..663cd8c86 100644 --- a/forge/forge/op/eltwise_binary.py +++ b/forge/forge/op/eltwise_binary.py @@ -441,3 +441,7 @@ def LogicalAnd(name: str, operandA: Tensor, operandB: Union[Tensor, Parameter]) """ return op("logical_and", name, operandA, operandA).get_tensor() + + +def Remainder(name: str, operandA: Tensor, operandB: Union[Tensor, Parameter]) -> Tensor: + return _Eltwise(name, operandA, operandB, "remainder") diff --git a/forge/forge/op/eval/forge/__init__.py b/forge/forge/op/eval/forge/__init__.py index 198793757..bd0549669 100644 --- a/forge/forge/op/eval/forge/__init__.py +++ b/forge/forge/op/eval/forge/__init__.py @@ -28,6 +28,7 @@ "add": "eltwise_binary", "cast": Cast, "divide": "eltwise_binary", + "remainder": "eltwise_binary", "subtract": "eltwise_binary", "multiply": "eltwise_binary", "maximum": "eltwise_binary", diff --git a/forge/forge/op/eval/forge/eltwise_binary.py b/forge/forge/op/eval/forge/eltwise_binary.py index 19c02cb1f..48ef42d40 100644 --- a/forge/forge/op/eval/forge/eltwise_binary.py +++ b/forge/forge/op/eval/forge/eltwise_binary.py @@ -50,6 +50,7 @@ def eval(type, attr, ops): "equal": lambda i: torch.eq(t_ops[0], t_ops[1]).to(t_ops[0].dtype), "not_equal": lambda i: torch.ne(t_ops[0], t_ops[1]).to(t_ops[0].dtype), "logical_and": lambda i: torch.logical_and(t_ops[0], t_ops[1]).to(t_ops[0].dtype), + "remainder": lambda i: torch.remainder(t_ops[0], t_ops[1]), } assert type in f, f"{type} not defined in eval map for eltwise binary ops." diff --git a/forge/forge/tvm_to_python.py b/forge/forge/tvm_to_python.py index f687e1bb6..218eb72f5 100644 --- a/forge/forge/tvm_to_python.py +++ b/forge/forge/tvm_to_python.py @@ -1615,6 +1615,7 @@ def populate_requantize_args(graph, nid, compiler_cfg): tvm_to_forge_op_map = { "abs": "abs", "add": "add", + "floor_mod": "remainder", "argmax": "argmax", "broadcast_to": "broadcast", "cast": "cast", @@ -1700,6 +1701,7 @@ def populate_requantize_args(graph, nid, compiler_cfg): forge_op_to_function_name = { "abs": "forge.op.Abs", "add": "forge.op.Add", + "remainder": "forge.op.Remainder", "adv_index": "forge.op.AdvIndex", "argmax": "forge.op.Argmax", "avg_pool1d": "forge.op.AvgPool1d", diff --git a/forge/test/mlir/test_ops.py b/forge/test/mlir/test_ops.py index 22af7323b..8639277b7 100644 --- a/forge/test/mlir/test_ops.py +++ b/forge/test/mlir/test_ops.py @@ -1724,3 +1724,25 @@ def forward(self, *tensors): co_out = [co.to("cpu") for co in co_out] fw_out = [fw_out] if isinstance(fw_out, torch.Tensor) else fw_out + + +@pytest.mark.push +def test_remainder(): + class Remainder(nn.Module): + def __init__(self): + super().__init__() + + def forward(self, a, b): + return a % b + + inputs = [torch.rand(2, 32, 32), torch.rand(2, 32, 32)] + + framework_model = Remainder() + fw_out = framework_model(*inputs) + + compiled_model = forge.compile(framework_model, sample_inputs=inputs) + co_out = compiled_model(*inputs) + + co_out = [co.to("cpu") for co in co_out] + fw_out = [fw_out] if isinstance(fw_out, torch.Tensor) else fw_out + assert all([compare_with_golden_pcc(golden=fo, calculated=co, pcc=0.99) for fo, co in zip(fw_out, co_out)]) diff --git a/forge/test/model_demos/high_prio/nlp/pytorch/test_opt.py b/forge/test/model_demos/high_prio/nlp/pytorch/test_opt.py index a0bcbbad4..b5de61dad 100644 --- a/forge/test/model_demos/high_prio/nlp/pytorch/test_opt.py +++ b/forge/test/model_demos/high_prio/nlp/pytorch/test_opt.py @@ -85,7 +85,7 @@ def test_opt_qa(variant, test_device): def test_opt_sequence_classification(variant, test_device): # Set Forge configuration parameters compiler_cfg = forge.config._get_global_compiler_config() - compiler_cfg.compile_depth = forge.CompileDepth.INIT_COMPILE + compiler_cfg.compile_depth = forge.CompileDepth.SPLIT_GRAPH # Load tokenizer and model from HuggingFace # Variants: "facebook/opt-125m", "facebook/opt-350m", "facebook/opt-1.3b"