Skip to content

Commit

Permalink
Push mark for indexing tests (#1182)
Browse files Browse the repository at this point in the history
### What's changed
Just added push mark to the indexing tests in
`tests/mlir/operators/indexing` folder.
  • Loading branch information
vkovinicTT authored Feb 6, 2025
1 parent 9b18def commit 16c04d5
Show file tree
Hide file tree
Showing 8 changed files with 42 additions and 0 deletions.
6 changes: 6 additions & 0 deletions forge/test/mlir/operators/indexing/test_advanced_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
),
],
)
@pytest.mark.push
def test_take(tensor_and_indices):
tensor, indices = tensor_and_indices

Expand Down Expand Up @@ -68,6 +69,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_index_add(input_dim_index_source):
input_tensor, dim, index, source = input_dim_index_source

Expand Down Expand Up @@ -109,6 +111,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_index_fill(input_dim_index_value):
input_tensor, dim, index, value = input_dim_index_value

Expand Down Expand Up @@ -159,6 +162,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_index_copy(input_dim_index_source):
input_tensor, dim, index, source = input_dim_index_source

Expand Down Expand Up @@ -195,6 +199,7 @@ def forward(self, x):
(torch.arange(24, dtype=torch.float32).reshape(4, 3, 2), 2, torch.tensor([0, 1])), # 3D tensor case
],
)
@pytest.mark.push
def test_index_select(input_dim_index):
input_tensor, dim, index = input_dim_index

Expand Down Expand Up @@ -243,6 +248,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_index_put(input_indices_values_accumulate):
input_tensor, indices, values, accumulate = input_indices_values_accumulate

Expand Down
2 changes: 2 additions & 0 deletions forge/test/mlir/operators/indexing/test_advanced_masking.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
),
],
)
@pytest.mark.push
def test_masked_select(input_tensor, mask):
class MaskedSelectModule(torch.nn.Module):
def __init__(self, mask):
Expand Down Expand Up @@ -77,6 +78,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_masked_fill(input_tensor, mask, value):
class MaskedFillModule(torch.nn.Module):
def __init__(self, mask, value):
Expand Down
6 changes: 6 additions & 0 deletions forge/test/mlir/operators/indexing/test_advanced_slicing.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
(torch.arange(1.0, 5.0).reshape(1, 4), 1, 2), # Wide matrix, select third column
],
)
@pytest.mark.push
def test_select(input_dim_index):
input_tensor, dim, index = input_dim_index

Expand Down Expand Up @@ -65,6 +66,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_split(input_tensor_sizes_dim):
input_tensor, sizes_or_parts, dim = input_tensor_sizes_dim

Expand Down Expand Up @@ -102,6 +104,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_chunk(input_tensor_chunks_dim):
input_tensor, chunks, dim = input_tensor_chunks_dim

Expand Down Expand Up @@ -182,6 +185,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_take(input_tensor_indices):
input_tensor, indices = input_tensor_indices

Expand Down Expand Up @@ -244,6 +248,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_nonzero(input_tensor_as_tuple):
input_tensor, as_tuple = input_tensor_as_tuple

Expand Down Expand Up @@ -311,6 +316,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_narrow(input_dim_start_length):
input_tensor, dim, start, length = input_dim_start_length

Expand Down
3 changes: 3 additions & 0 deletions forge/test/mlir/operators/indexing/test_basic_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
(-1, (10,)),
],
)
@pytest.mark.push
def test_python_indexing(index_shape: Literal[0] | Literal[2] | Literal[-1]):

index, shape = index_shape
Expand Down Expand Up @@ -68,6 +69,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_python_indexing_with_lists(index_shape: list[int] | list[list[int]]):
indices, shape = index_shape

Expand Down Expand Up @@ -111,6 +113,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_python_indexing_with_tensors(index_shape):
indices, shape = index_shape

Expand Down
3 changes: 3 additions & 0 deletions forge/test/mlir/operators/indexing/test_basic_masking.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
),
],
)
@pytest.mark.push
def test_masking_greater_than(input_tensor, param):
class GreaterThanMaskingModule(torch.nn.Module):
def __init__(self, param):
Expand Down Expand Up @@ -65,6 +66,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_masking_modulus(input_tensor, mod_value):
class ModulusMaskingModule(torch.nn.Module):
def __init__(self, mod_value):
Expand Down Expand Up @@ -103,6 +105,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_masking_combined_conditions(input_tensor, greater_param, mod_param):
class CombinedMaskingModule(torch.nn.Module):
def __init__(self, greater_param, mod_param):
Expand Down
2 changes: 2 additions & 0 deletions forge/test/mlir/operators/indexing/test_basic_slicing.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
+ "loc(\"index_1.dc.unsqueeze.0\"(\"forward\":4294967295:29)): error: 'ttir.unsqueeze' op requires attribute 'dim'"
+ 'loc("SlicingModule":0:0): error: module verification failed'
)
@pytest.mark.push
def test_slicing(input_tensor_slice):
input_tensor, slicing = input_tensor_slice

Expand Down Expand Up @@ -92,6 +93,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_multidimensional_slicing(input_tensor_slicing):
input_tensor, slicing = input_tensor_slicing

Expand Down
13 changes: 13 additions & 0 deletions forge/test/mlir/operators/indexing/test_indexing_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
),
],
)
@pytest.mark.push
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::diagonal']")
def test_diagonal(input_tensor, offset, dim1, dim2):
class DiagonalModule(nn.Module):
Expand Down Expand Up @@ -101,6 +102,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::diag']")
def test_diag(input_tensor, diagonal):
class DiagModule(nn.Module):
Expand Down Expand Up @@ -159,6 +161,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::diag_embed']")
def test_diag_embed(input_tensor, offset, dim1, dim2):
class DiagEmbedModule(nn.Module):
Expand Down Expand Up @@ -209,6 +212,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
@pytest.mark.xfail(reason="AssertionError: Data mismatch on output 0 between framework and Forge codegen")
def test_triu(input_tensor, diagonal):
class TriuModule(nn.Module):
Expand Down Expand Up @@ -259,6 +263,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_tril(input_tensor, diagonal):
class TrilModule(nn.Module):
def __init__(self, diagonal):
Expand Down Expand Up @@ -314,6 +319,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::take_along_dim']")
@pytest.mark.push
def test_take_along_dim(input_tensor, indices, dim):
class TakeAlongDimModule(nn.Module):
def __init__(self, dim):
Expand Down Expand Up @@ -380,6 +386,7 @@ def forward(self, x, indices):
),
],
)
@pytest.mark.push
def test_gather(input_tensor, index, dim, sparse_grad):
class GatherModule(nn.Module):
def __init__(self, dim, sparse_grad, index):
Expand Down Expand Up @@ -430,6 +437,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="Not supported in our version of pytorch")
@pytest.mark.push
def test_unravel_index(indices, shape):
class UnravelIndexModule(nn.Module):
def __init__(self, shape):
Expand Down Expand Up @@ -477,6 +485,7 @@ def forward(self, indices):
],
)
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::put']")
@pytest.mark.push
def test_put(input_tensor, indices, values):
class PutModule(nn.Module):
def __init__(self, indices, values):
Expand Down Expand Up @@ -540,6 +549,7 @@ def forward(self, x):
),
],
)
@pytest.mark.push
def test_unique(input_tensor, sorted, return_inverse, return_counts, dim):
class UniqueModule(nn.Module):
def __init__(self, sorted, return_inverse, return_counts, dim):
Expand Down Expand Up @@ -602,6 +612,7 @@ def forward(self, x):
@pytest.mark.xfail(
reason="NotImplementedError: The following operators are not implemented: ['aten::unique_consecutive']"
)
@pytest.mark.push
def test_unique_consecutive(input_tensor, return_inverse, return_counts, dim):
class UniqueConsecutiveModule(nn.Module):
def __init__(self, return_inverse, return_counts, dim):
Expand Down Expand Up @@ -644,6 +655,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="BinaryOpType cannot be mapped to BcastOpMath")
@pytest.mark.push
def test_where(input_tensor1, input_tensor2):
class WhereModule(nn.Module):
def __init__(self, input2):
Expand Down Expand Up @@ -679,6 +691,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::argwhere']")
@pytest.mark.push
def test_argwhere(input_tensor):
class ArgwhereModule(nn.Module):
def forward(self, x):
Expand Down
7 changes: 7 additions & 0 deletions forge/test/mlir/operators/indexing/test_scatter_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
],
)
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::masked_scatter']")
@pytest.mark.push
def test_masked_scatter(input_tensor, mask, source):
class MaskedScatterModule(torch.nn.Module):
def __init__(self, mask, source):
Expand Down Expand Up @@ -105,6 +106,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="AssertionError: Encountered unsupported op types. Check error logs for more details.")
@pytest.mark.push
def test_scatter(input_tensor, dim, index, source):
class ScatterModule(torch.nn.Module):
def __init__(self, dim, index, source):
Expand Down Expand Up @@ -196,6 +198,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="Encountered unsupported op node type: scatter_elements, on device: tt")
@pytest.mark.push
def test_scatter_reduce(input_tensor, dim, index, source, reduce_mode):
class ScatterReduceModule(torch.nn.Module):
def __init__(self, dim, index, source, reduce_mode):
Expand Down Expand Up @@ -262,6 +265,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="Encountered unsupported op node type: scatter_elements, on device: tt")
@pytest.mark.push
def test_scatter_add(input_tensor, dim, index, source):
class ScatterAddModule(torch.nn.Module):
def __init__(self, dim, index, source):
Expand Down Expand Up @@ -316,6 +320,7 @@ def forward(self, x):
@pytest.mark.xfail(
reason="NotImplementedError: The following operators are not implemented: ['aten::diagonal_scatter']"
)
@pytest.mark.push
def test_diagonal_scatter(input_tensor, source, offset, dim1, dim2):
class DiagonalScatterModule(torch.nn.Module):
def __init__(self, source, offset, dim1, dim2):
Expand Down Expand Up @@ -359,6 +364,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::select_scatter']")
@pytest.mark.push
def test_select_scatter(input_tensor, source, dim, index):
class SelectScatterModule(torch.nn.Module):
def __init__(self, source, dim, index):
Expand Down Expand Up @@ -412,6 +418,7 @@ def forward(self, x):
],
)
@pytest.mark.xfail(reason="NotImplementedError: The following operators are not implemented: ['aten::slice_scatter']")
@pytest.mark.push
def test_slice_scatter(input_tensor, src, dim, start, end, step):
class SliceScatterModule(torch.nn.Module):
def __init__(self, source, dim, start, end, step):
Expand Down

0 comments on commit 16c04d5

Please sign in to comment.