diff --git a/include/ttmlir/Dialect/TTIR/IR/TTIROps.td b/include/ttmlir/Dialect/TTIR/IR/TTIROps.td index bc27fcc3fc..e2b34929a7 100644 --- a/include/ttmlir/Dialect/TTIR/IR/TTIROps.td +++ b/include/ttmlir/Dialect/TTIR/IR/TTIROps.td @@ -803,6 +803,40 @@ def TTIR_ConcatOp : TTIR_DPSOp<"concat"> { let hasVerifier = 1; } +def TTIR_RepeatOp : TTIR_DPSOp<"repeat"> { + let summary = "Repeat operation."; + let description = [{ + The `repeat` operation creates a new tensor by replicating the input tensor's elements + along specified dimensions. The number of repetitions for each dimension is defined by + the `repeats` attribute, which must have the same rank as the input tensor. + + Parameters: + - `input`: The input tensor. + - `repeats`: Specifies the number of times to repeat this tensor along each dimension. + + ### Example IR Usage: + ```mlir + // Input tensor of shape (2, 3) + %input = ... : tensor<2x3xf32> + + // Repeat each dimension twice + %repeated = "repeat"(%input) {repeat_dimensions = dense<[2, 2]> : tensor<2xi64>} : tensor<2x3xf32> -> tensor<4x6xf32> + ``` + }]; + + let arguments = (ins AnyRankedTensor:$input, + AnyRankedTensor:$output, + DenseI32ArrayAttr:$repeat_dimensions); + + let results = (outs AnyRankedTensor:$result); + + let extraClassDeclaration = [{ + MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); } + }]; + + let hasVerifier = 1; +} + def TTIR_RepeatInterleaveOp : TTIR_DPSOp<"repeat_interleave"> { let summary = "Repeat interleave op."; let description = [{ diff --git a/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td b/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td index 58d0a35bd5..dbc41ecb22 100644 --- a/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td +++ b/include/ttmlir/Dialect/TTNN/IR/TTNNOps.td @@ -774,11 +774,15 @@ def TTNN_ReshapeOp : TTNN_Op<"reshape"> { def TTNN_RepeatOp : TTNN_Op<"repeat"> { let summary = "Repeat op."; let description = [{ - Repeat the input tensor according to number of times specified in repeat_dimensions. + Returns a new tensor filled with repetition of input tensor according to number of times specified in repeat_dims. + + Parameters: + - `input_tensor` (ttnn.Tensor): the input tensor. + - `repeat_dims` (number): The number of repetitions for each element. }]; let arguments = (ins AnyRankedTensor:$input, - I32ArrayAttr:$shape); + I32ArrayAttr:$repeat_dims); let results = (outs AnyRankedTensor:$result); diff --git a/include/ttmlir/Target/TTNN/program.fbs b/include/ttmlir/Target/TTNN/program.fbs index 10c4ba319a..6f98a0e0ae 100644 --- a/include/ttmlir/Target/TTNN/program.fbs +++ b/include/ttmlir/Target/TTNN/program.fbs @@ -229,7 +229,7 @@ table ReshapeOp { table RepeatOp { in: tt.target.TensorRef; out: tt.target.TensorRef; - shape: [uint32]; + repeat_dims: [uint32]; } table SliceOp { diff --git a/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp b/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp index 77fef33315..4903730214 100644 --- a/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp +++ b/lib/Conversion/TTIRToTTNN/TTIRToTTNN.cpp @@ -660,6 +660,23 @@ class BroadcastOpConversionPattern } }; +class RepeatOpConversionPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + +public: + LogicalResult + matchAndRewrite(ttir::RepeatOp op, ttir::RepeatOp::Adaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto repeatDimensionsAttr = adaptor.getRepeatDimensionsAttr(); + + rewriter.replaceOpWithNewOp( + op, this->getTypeConverter()->convertType(op.getType()), + adaptor.getInput(), rewriter.getI32ArrayAttr(repeatDimensionsAttr)); + + return success(); + } +}; + class UnsqueezeOpConversionPattern : public OpConversionPattern { public: @@ -1335,6 +1352,7 @@ void populateTTIRToTTNNPatterns(MLIRContext *ctx, RewritePatternSet &patterns, BroadcastOpConversionPattern, EmbeddingOpConversionPattern, EmbeddingBackwardOpConversionPattern, + RepeatOpConversionPattern, RepeatInterleaveOpConversionPattern, SoftmaxOpConversionPattern, TransposeOpConversionPattern, diff --git a/lib/Dialect/TTIR/IR/TTIROps.cpp b/lib/Dialect/TTIR/IR/TTIROps.cpp index a835f1251e..966503571a 100644 --- a/lib/Dialect/TTIR/IR/TTIROps.cpp +++ b/lib/Dialect/TTIR/IR/TTIROps.cpp @@ -1614,6 +1614,50 @@ ::mlir::LogicalResult mlir::tt::ttir::AllocOp::verify() { return success(); } +//===----------------------------------------------------------------------===// +// RepeatOp +//===----------------------------------------------------------------------===// + +// BroadcastOp verification +::mlir::LogicalResult mlir::tt::ttir::RepeatOp::verify() { + ::mlir::RankedTensorType inputType = getInput().getType(); + ::mlir::RankedTensorType outputType = getOutput().getType(); + llvm::ArrayRef repeatDimensions = getRepeatDimensions(); + + // Input tensor and repeate dimension argument must have same rank + if (inputType.getRank() != static_cast(repeatDimensions.size())) { + return emitOpError() << "Input tensor rank " << inputType.getRank() + << " doesn't match the number of repeat dimensions " + << repeatDimensions.size() << "."; + } + + // Input and output tensors must have the same rank + if (inputType.getRank() != outputType.getRank()) { + return emitOpError() << "Input tensor rank " << inputType.getRank() + << " doesn't match the output tensor rank " + << outputType.getRank() << "."; + } + + // Verify output shape based on input shape and repeat dimension argument + llvm::ArrayRef inputShape = inputType.getShape(); + llvm::ArrayRef outputShape = outputType.getShape(); + + for (size_t i = 0; i < inputShape.size(); i++) { + int64_t expectedDimValue = inputShape[i] * repeatDimensions[i]; + if (expectedDimValue != outputShape[i]) { + return emitOpError() << "Input tensor shape (" + << ttmlir::utils::join(inputShape, ",") + << ") at index " << i + << " does not repeat to output (" + << ttmlir::utils::join(outputShape, ",") + << ") using repeat value " << repeatDimensions[i] + << "."; + } + } + + return success(); +} + //===----------------------------------------------------------------------===// // RepeatInterleaveOp //===----------------------------------------------------------------------===// diff --git a/lib/Dialect/TTNN/IR/TTNNOps.cpp b/lib/Dialect/TTNN/IR/TTNNOps.cpp index 2a3d45216d..274e2f7fa9 100644 --- a/lib/Dialect/TTNN/IR/TTNNOps.cpp +++ b/lib/Dialect/TTNN/IR/TTNNOps.cpp @@ -412,19 +412,35 @@ ::mlir::LogicalResult mlir::tt::ttnn::ConcatOp::verify() { ::mlir::LogicalResult mlir::tt::ttnn::RepeatOp::verify() { ::mlir::RankedTensorType inputType = getInput().getType(); ::mlir::RankedTensorType outputType = getResult().getType(); + auto repeatDims = getRepeatDims(); - auto shape = getShape(); + // Verify that the input tensor and repeate_dims argument have same rank + if (inputType.getRank() != static_cast(repeatDims.size())) { + return emitOpError() << "Input tensor rank " << inputType.getRank() + << " doesn't match the number of repeat dimensions " + << repeatDims.size() << "."; + } + + // Verify that the input and output tensor have same rank + if (inputType.getRank() != outputType.getRank()) { + return emitOpError() << "Input tensor rank " << inputType.getRank() + << " doesn't match the output tensor rank " + << outputType.getRank() << "."; + } + + // Verify expected output shape auto inputShape = inputType.getShape(); auto outputShape = outputType.getShape(); - for (size_t i = 0; i < shape.size(); i++) { - uint32_t dimValue = mlir::cast(shape[i]).getInt(); + for (size_t i = 0; i < getRepeatDims().size(); i++) { + uint32_t dimValue = mlir::cast(repeatDims[i]).getInt(); if (inputShape[i] * dimValue != outputShape[i]) { return emitOpError() << "Input tensor shape (" - << ttmlir::utils::join(inputShape, ",") << ") index " - << i << " does not repeat to output (" + << ttmlir::utils::join(inputShape, ",") + << ") at index " << i + << " does not repeat to output (" << ttmlir::utils::join(outputShape, ",") - << ") using repeat value " << dimValue; + << ") using repeat value " << dimValue << "."; } } diff --git a/lib/Target/TTNN/TTNNToFlatbuffer.cpp b/lib/Target/TTNN/TTNNToFlatbuffer.cpp index 1d52bc8c47..8e258795cf 100644 --- a/lib/Target/TTNN/TTNNToFlatbuffer.cpp +++ b/lib/Target/TTNN/TTNNToFlatbuffer.cpp @@ -874,8 +874,8 @@ ::flatbuffers::Offset<::tt::target::ttnn::RepeatOp> createRepeatOp(FlatbufferObjectCache &cache, RepeatOp op) { auto in = cache.at<::tt::target::TensorRef>(getOperandThroughDPSOps(op.getInput())); - auto shape = - arrayAttrToFlatbuffer(cache, op.getShape()); + auto shape = arrayAttrToFlatbuffer( + cache, op.getRepeatDims()); auto out = cache.getOrCreate(op.getResult(), tensorValueToFlatbuffer, kHostAllocatedAddress, kHostAllocatedSize); diff --git a/runtime/lib/ttnn/operations/data_movement/repeat.cpp b/runtime/lib/ttnn/operations/data_movement/repeat.cpp index 17a2a98802..39bae64df6 100644 --- a/runtime/lib/ttnn/operations/data_movement/repeat.cpp +++ b/runtime/lib/ttnn/operations/data_movement/repeat.cpp @@ -11,10 +11,10 @@ void run(const ::tt::target::ttnn::RepeatOp *op, ProgramContext &context) { ProgramTensorPool &tensorPool = context.getTensorPool(); const ::ttnn::Tensor &in = tensorPool.at(op->in()->global_id()); DEBUG_ASSERT(in.is_allocated()); - const auto *fbShape = op->shape(); - const std::vector dims(fbShape->begin(), fbShape->end()); - ::ttnn::Shape shape(dims); - ::ttnn::Tensor out = ::ttnn::repeat(in, shape); + const auto *fbShape = op->repeat_dims(); + const std::vector repeatDims(fbShape->begin(), fbShape->end()); + ::ttnn::Shape repeatDimsShape(repeatDims); + ::ttnn::Tensor out = ::ttnn::repeat(in, repeatDimsShape); tensorPool.insert_or_assign(op->out()->global_id(), out); } } // namespace tt::runtime::ttnn::operations::data_movement diff --git a/test/ttmlir/Dialect/TTIR/broadcast/ttir_broadcast_negative_rank.mlir b/test/ttmlir/Dialect/TTIR/data_movement/broadcast/ttir_broadcast_negative_rank.mlir similarity index 100% rename from test/ttmlir/Dialect/TTIR/broadcast/ttir_broadcast_negative_rank.mlir rename to test/ttmlir/Dialect/TTIR/data_movement/broadcast/ttir_broadcast_negative_rank.mlir diff --git a/test/ttmlir/Dialect/TTIR/broadcast/ttir_broadcast_negative_repeat_dimensions.mlir b/test/ttmlir/Dialect/TTIR/data_movement/broadcast/ttir_broadcast_negative_repeat_dimensions.mlir similarity index 100% rename from test/ttmlir/Dialect/TTIR/broadcast/ttir_broadcast_negative_repeat_dimensions.mlir rename to test/ttmlir/Dialect/TTIR/data_movement/broadcast/ttir_broadcast_negative_repeat_dimensions.mlir diff --git a/test/ttmlir/Dialect/TTIR/broadcast/ttir_broadcast_negative_shape.mlir b/test/ttmlir/Dialect/TTIR/data_movement/broadcast/ttir_broadcast_negative_shape.mlir similarity index 100% rename from test/ttmlir/Dialect/TTIR/broadcast/ttir_broadcast_negative_shape.mlir rename to test/ttmlir/Dialect/TTIR/data_movement/broadcast/ttir_broadcast_negative_shape.mlir diff --git a/test/ttmlir/Dialect/TTIR/permute/permute_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/data_movement/permute/permute_tests_negative.mlir similarity index 100% rename from test/ttmlir/Dialect/TTIR/permute/permute_tests_negative.mlir rename to test/ttmlir/Dialect/TTIR/data_movement/permute/permute_tests_negative.mlir diff --git a/test/ttmlir/Dialect/TTIR/data_movement/repeat/repeat_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/data_movement/repeat/repeat_tests_negative.mlir new file mode 100644 index 0000000000..be6bc97783 --- /dev/null +++ b/test/ttmlir/Dialect/TTIR/data_movement/repeat/repeat_tests_negative.mlir @@ -0,0 +1,36 @@ +// RUN: not ttmlir-opt --split-input-file %s 2>&1 | FileCheck %s +// Negative tests for repeat operation + +// Verify that the parsing fails if the input tensor and repeat_dimensions attribute doesn't have the same rank +module { + func.func @repeat_not_valid_repeat_dimension_attribute(%arg0: tensor<32x32xf32>) -> tensor<32x64xf32> { + // CHECK: 'ttir.repeat' op Input tensor rank 2 doesn't match the number of repeat dimensions 1. + %0 = tensor.empty() : tensor<32x64xf32> + %1 = "ttir.repeat"(%arg0, %0) {repeat_dimensions = array} : (tensor<32x32xf32>, tensor<32x64xf32>) -> tensor<32x64xf32> + return %1 : tensor<32x64xf32> + } +} + +// ----- + +// Verify that the parsing fails if the input tensor and repeat_dimensions attribute doesn't have the same rank +module { + func.func @repeat_not_valid_input_output(%arg0: tensor<32x32xf32>) -> tensor<1x32x64xf32> { + // CHECK: 'ttir.repeat' op Input tensor rank 2 doesn't match the output tensor rank 3. + %0 = tensor.empty() : tensor<1x32x64xf32> + %1 = "ttir.repeat"(%arg0, %0) {repeat_dimensions = array} : (tensor<32x32xf32>, tensor<1x32x64xf32>) -> tensor<1x32x64xf32> + return %1 : tensor<1x32x64xf32> + } +} + +// ----- + +// Verify that the parsing fails if the output tensor dimensions are not expected +module { + func.func @repeat_not_valid_input_output(%arg0: tensor<32x32xf32>) -> tensor<32x128xf32> { + // CHECK: 'ttir.repeat' op Input tensor shape (32,32) at index 1 does not repeat to output (32,128) using repeat value 2. + %0 = tensor.empty() : tensor<32x128xf32> + %1 = "ttir.repeat"(%arg0, %0) {repeat_dimensions = array} : (tensor<32x32xf32>, tensor<32x128xf32>) -> tensor<32x128xf32> + return %1 : tensor<32x128xf32> + } +} diff --git a/test/ttmlir/Dialect/TTIR/repeat_interleave/repeat_interleave_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/data_movement/repeat_interleave/repeat_interleave_tests_negative.mlir similarity index 100% rename from test/ttmlir/Dialect/TTIR/repeat_interleave/repeat_interleave_tests_negative.mlir rename to test/ttmlir/Dialect/TTIR/data_movement/repeat_interleave/repeat_interleave_tests_negative.mlir diff --git a/test/ttmlir/Dialect/TTIR/slice/slice_tests_negative.mlir b/test/ttmlir/Dialect/TTIR/data_movement/slice/slice_tests_negative.mlir similarity index 100% rename from test/ttmlir/Dialect/TTIR/slice/slice_tests_negative.mlir rename to test/ttmlir/Dialect/TTIR/data_movement/slice/slice_tests_negative.mlir diff --git a/test/ttmlir/Dialect/TTIR/slice/slice_tests_positive.mlir b/test/ttmlir/Dialect/TTIR/data_movement/slice/slice_tests_positive.mlir similarity index 100% rename from test/ttmlir/Dialect/TTIR/slice/slice_tests_positive.mlir rename to test/ttmlir/Dialect/TTIR/data_movement/slice/slice_tests_positive.mlir diff --git a/test/ttmlir/Dialect/TTNN/concat/concat_dim_oob.mlir b/test/ttmlir/Dialect/TTNN/data_movement/concat/concat_dim_oob.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/concat/concat_dim_oob.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/concat/concat_dim_oob.mlir diff --git a/test/ttmlir/Dialect/TTNN/concat/concat_multiple_tensors.mlir b/test/ttmlir/Dialect/TTNN/data_movement/concat/concat_multiple_tensors.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/concat/concat_multiple_tensors.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/concat/concat_multiple_tensors.mlir diff --git a/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim.mlir b/test/ttmlir/Dialect/TTNN/data_movement/concat/concat_negative_dim.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/concat/concat_negative_dim.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/concat/concat_negative_dim.mlir diff --git a/test/ttmlir/Dialect/TTNN/concat/concat_negative_dim_oob.mlir b/test/ttmlir/Dialect/TTNN/data_movement/concat/concat_negative_dim_oob.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/concat/concat_negative_dim_oob.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/concat/concat_negative_dim_oob.mlir diff --git a/test/ttmlir/Dialect/TTNN/concat/simple_concat.mlir b/test/ttmlir/Dialect/TTNN/data_movement/concat/simple_concat.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/concat/simple_concat.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/concat/simple_concat.mlir diff --git a/test/ttmlir/Dialect/TTNN/permute/permute_tests_negative.mlir b/test/ttmlir/Dialect/TTNN/data_movement/permute/permute_tests_negative.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/permute/permute_tests_negative.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/permute/permute_tests_negative.mlir diff --git a/test/ttmlir/Dialect/TTNN/permute/permute_tests_positive.mlir b/test/ttmlir/Dialect/TTNN/data_movement/permute/permute_tests_positive.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/permute/permute_tests_positive.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/permute/permute_tests_positive.mlir diff --git a/test/ttmlir/Dialect/TTNN/permute/simple_permute.mlir b/test/ttmlir/Dialect/TTNN/data_movement/permute/simple_permute.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/permute/simple_permute.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/permute/simple_permute.mlir diff --git a/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_tests_negative.mlir b/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_tests_negative.mlir new file mode 100644 index 0000000000..f82d5f2bc1 --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_tests_negative.mlir @@ -0,0 +1,33 @@ +// RUN: not ttmlir-opt --split-input-file %s 2>&1 | FileCheck %s +// Negative tests for repeat operation + +// Verify that the parsing fails if the input tensor and repeat_dimensions attribute doesn't have the same rank +module { + func.func @repeat_not_valid_repeat_dimension_attribute(%arg0: tensor<32x32xf32>) -> tensor<32x64xf32> { + // CHECK: 'ttnn.repeat' op Input tensor rank 2 doesn't match the number of repeat dimensions 1. + %0 = "ttnn.repeat"(%arg0) {repeat_dims = [2 : i32]} : (tensor<32x32xf32>) -> tensor<32x64xf32> + return %0 : tensor<32x64xf32> + } +} + +// ----- + +// Verify that the parsing fails if the input tensor and repeat_dimensions attribute doesn't have the same rank +module { + func.func @repeat_not_valid_input_output(%arg0: tensor<32x32xf32>) -> tensor<1x32x64xf32> { + // CHECK: 'ttnn.repeat' op Input tensor rank 2 doesn't match the output tensor rank 3. + %0 = "ttnn.repeat"(%arg0) {repeat_dims = [1 : i32, 2 : i32]} : (tensor<32x32xf32>) -> tensor<1x32x64xf32> + return %0 : tensor<1x32x64xf32> + } +} + +// ----- + +// Verify that the parsing fails if the output tensor dimensions are not expected +module { + func.func @repeat_not_valid_input_output(%arg0: tensor<32x32xf32>) -> tensor<32x128xf32> { + // CHECK: 'ttnn.repeat' op Input tensor shape (32,32) at index 1 does not repeat to output (32,128) using repeat value 2. + %0 = "ttnn.repeat"(%arg0) {repeat_dims = [1 : i32, 2 : i32]} : (tensor<32x32xf32>) -> tensor<32x128xf32> + return %0 : tensor<32x128xf32> + } +} diff --git a/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_tests_positive.mlir b/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_tests_positive.mlir new file mode 100644 index 0000000000..938a3b832d --- /dev/null +++ b/test/ttmlir/Dialect/TTNN/data_movement/repeat/repeat_tests_positive.mlir @@ -0,0 +1,19 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline %s | FileCheck %s + +module { + func.func @repeat_on_one_dim(%arg0: tensor<1x32x32xf32>) -> tensor<32x32x32xf32> { + // CHECK: "ttnn.repeat" + // CHECK-SAME: repeat_dims = [32 : i32, 1 : i32, 1 : i32] + %0 = tensor.empty() : tensor<32x32x32xf32> + %1 = "ttir.repeat"(%arg0, %0) {repeat_dimensions = array} : (tensor<1x32x32xf32>, tensor<32x32x32xf32>) -> tensor<32x32x32xf32> + return %1 : tensor<32x32x32xf32> + } + + func.func @repeat_on_all_dims(%arg0: tensor<1x1x32xf32>) -> tensor<32x32x64xf32> { + // CHECK: "ttnn.repeat" + // CHECK-SAME: repeat_dims = [32 : i32, 32 : i32, 2 : i32] + %0 = tensor.empty() : tensor<32x32x64xf32> + %1 = "ttir.repeat"(%arg0, %0) {repeat_dimensions = array} : (tensor<1x1x32xf32>, tensor<32x32x64xf32>) -> tensor<32x32x64xf32> + return %1 : tensor<32x32x64xf32> + } +} diff --git a/test/ttmlir/Dialect/TTNN/simple_repeat.mlir b/test/ttmlir/Dialect/TTNN/data_movement/repeat/simple_repeat.mlir similarity index 93% rename from test/ttmlir/Dialect/TTNN/simple_repeat.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/repeat/simple_repeat.mlir index 00fddfb786..8cce80f915 100644 --- a/test/ttmlir/Dialect/TTNN/simple_repeat.mlir +++ b/test/ttmlir/Dialect/TTNN/data_movement/repeat/simple_repeat.mlir @@ -2,7 +2,7 @@ module { func.func @main(%arg0: tensor<1x16x32xf32>, %arg1: tensor<1x1x32xf32>) -> tensor<1x16x32xf32> { // CHECK: %{{[0-9]+}} = "ttnn.repeat" - // CHECK-SAME: shape = [1 : i32, 16 : i32, 1 : i32] + // CHECK-SAME: repeat_dims = [1 : i32, 16 : i32, 1 : i32] %0 = tensor.empty() : tensor<1x16x32xf32> %1 = "ttir.broadcast"(%arg1, %0) <{broadcast_dimensions = array}> : (tensor<1x1x32xf32>, tensor<1x16x32xf32>) -> tensor<1x16x32xf32> %2 = tensor.empty() : tensor<1x16x32xf32> @@ -15,7 +15,7 @@ module { func.func public @main(%arg0: tensor<1xf32>, %arg1: tensor<512x512xf32>) -> (tensor<512x512xf32>) { // CHECK: %{{[0-9]+}} = "ttnn.reshape" // CHECK: %{{[0-9]+}} = "ttnn.repeat" - // CHECK-SAME: shape = [512 : i32, 512 : i32] + // CHECK-SAME: repeat_dims = [512 : i32, 512 : i32] %0 = tensor.empty() : tensor<1x1xf32> %1 = "ttir.reshape"(%arg0, %0) <{shape = [1 : i32, 1 : i32]}> : (tensor<1xf32>, tensor<1x1xf32>) -> tensor<1x1xf32> %2 = tensor.empty() : tensor<512x512xf32> @@ -30,7 +30,7 @@ module { func.func @main(%arg0: tensor<1x23x40x1xf32>, %arg1: tensor<128xf32>) -> tensor<1x23x40x128xf32> { // CHECK: %{{[0-9]+}} = "ttnn.reshape" // CHECK: %{{[0-9]+}} = "ttnn.repeat" - // CHECK-SAME: shape = [1 : i32, 23 : i32, 40 : i32, 1 : i32] + // CHECK-SAME: repeat_dims = [1 : i32, 23 : i32, 40 : i32, 1 : i32] %0 = tensor.empty() : tensor<1x23x40x128xf32> %1 = "ttir.broadcast"(%arg0, %0) <{broadcast_dimensions = array}> : (tensor<1x23x40x1xf32>, tensor<1x23x40x128xf32>) -> tensor<1x23x40x128xf32> %2 = tensor.empty() : tensor<1x1x1x128xf32> @@ -46,7 +46,7 @@ module { module { func.func @main(%arg0: tensor<6x2xf32>) -> tensor<2400x2xf32> { // CHECK: %{{[0-9]+}} = "ttnn.repeat" - // CHECK-SAME: shape = [400 : i32, 1 : i32, 1 : i32, 1 : i32] + // CHECK-SAME: repeat_dims = [400 : i32, 1 : i32, 1 : i32, 1 : i32] %0 = tensor.empty() : tensor<1x6x2xf32> %1 = "ttir.reshape"(%arg0, %0) <{shape = [1 : i32, 6 : i32, 2 : i32]}> : (tensor<6x2xf32>, tensor<1x6x2xf32>) -> tensor<1x6x2xf32> %2 = tensor.empty() : tensor<1x6x1x2xf32> diff --git a/test/ttmlir/Dialect/TTNN/repeat_interleave/repeat_interleave_tests_negative.mlir b/test/ttmlir/Dialect/TTNN/data_movement/repeat_interleave/repeat_interleave_tests_negative.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/repeat_interleave/repeat_interleave_tests_negative.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/repeat_interleave/repeat_interleave_tests_negative.mlir diff --git a/test/ttmlir/Dialect/TTNN/repeat_interleave/repeat_interleave_tests_positive.mlir b/test/ttmlir/Dialect/TTNN/data_movement/repeat_interleave/repeat_interleave_tests_positive.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/repeat_interleave/repeat_interleave_tests_positive.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/repeat_interleave/repeat_interleave_tests_positive.mlir diff --git a/test/ttmlir/Dialect/TTNN/repeat_interleave/simple_repeat_interleave.mlir b/test/ttmlir/Dialect/TTNN/data_movement/repeat_interleave/simple_repeat_interleave.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/repeat_interleave/simple_repeat_interleave.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/repeat_interleave/simple_repeat_interleave.mlir diff --git a/test/ttmlir/Dialect/TTNN/reshape/reshape_folding_test.mlir b/test/ttmlir/Dialect/TTNN/data_movement/reshape/reshape_folding_test.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/reshape/reshape_folding_test.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/reshape/reshape_folding_test.mlir diff --git a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose.mlir b/test/ttmlir/Dialect/TTNN/data_movement/transpose/simple_transpose.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/transpose/simple_transpose.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/transpose/simple_transpose.mlir diff --git a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x16_reverse_dims.mlir b/test/ttmlir/Dialect/TTNN/data_movement/transpose/simple_transpose_8x16_reverse_dims.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x16_reverse_dims.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/transpose/simple_transpose_8x16_reverse_dims.mlir diff --git a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x8.mlir b/test/ttmlir/Dialect/TTNN/data_movement/transpose/simple_transpose_8x8.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/transpose/simple_transpose_8x8.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/transpose/simple_transpose_8x8.mlir diff --git a/test/ttmlir/Dialect/TTNN/transpose/simple_transpose_negative_dims.mlir b/test/ttmlir/Dialect/TTNN/data_movement/transpose/simple_transpose_negative_dims.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/transpose/simple_transpose_negative_dims.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/transpose/simple_transpose_negative_dims.mlir diff --git a/test/ttmlir/Dialect/TTNN/transpose/transpose_twice.mlir b/test/ttmlir/Dialect/TTNN/data_movement/transpose/transpose_twice.mlir similarity index 100% rename from test/ttmlir/Dialect/TTNN/transpose/transpose_twice.mlir rename to test/ttmlir/Dialect/TTNN/data_movement/transpose/transpose_twice.mlir diff --git a/test/ttmlir/Dialect/TTNN/implicit_broadcast.mlir b/test/ttmlir/Dialect/TTNN/implicit_broadcast.mlir index 37aceb3963..488c769168 100644 --- a/test/ttmlir/Dialect/TTNN/implicit_broadcast.mlir +++ b/test/ttmlir/Dialect/TTNN/implicit_broadcast.mlir @@ -17,7 +17,7 @@ func.func @main(%arg0: tensor<128xf32>, %arg1: tensor<128xf32>) -> tensor<784x12 // CHECK-NOT: "ttnn.repeat" // CHECK: %{{[0-9]+}} = "ttnn.reshape" // CHECK: %{{[0-9]+}} = "ttnn.repeat" - // CHECK-SAME: shape = [784 : i32, 1 : i32] + // CHECK-SAME: repeat_dims = [784 : i32, 1 : i32] // CHECK: %{{[0-9]+}} = "ttnn.add" %0 = tensor.empty() : tensor<1x128xf32> %1 = "ttir.reshape"(%arg0, %0) <{shape = [1 : i32, 128 : i32]}> : (tensor<128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32> @@ -35,7 +35,7 @@ func.func @main(%arg0: tensor<128xf32>, %arg1: tensor<128xf32>) -> tensor<784x12 module { func.func @main(%arg0: tensor<1x16x32xf32>, %arg1: tensor<1x1x32xf32>) -> tensor<1x16x32xf32> { // CHECK: [[VAL0:%[0-9]+]] = "ttnn.repeat" - // CHECK-SAME: shape = [1 : i32, 16 : i32, 1 : i32] + // CHECK-SAME: repeat_dims = [1 : i32, 16 : i32, 1 : i32] // CHECK: %{{[0-9]+}} = "ttnn.multiply"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) // CHECK: %{{[0-9]+}} = "ttnn.bitwise_and"([[VAL0]], %{{[0-9]+}}, %{{[0-9]+}}) %0 = tensor.empty() : tensor<1x16x32xf32> diff --git a/test/ttmlir/Silicon/TTNN/data_movement/repeat/simple_repeat.mlir b/test/ttmlir/Silicon/TTNN/data_movement/repeat/simple_repeat.mlir new file mode 100644 index 0000000000..fb39563595 --- /dev/null +++ b/test/ttmlir/Silicon/TTNN/data_movement/repeat/simple_repeat.mlir @@ -0,0 +1,12 @@ +// RUN: ttmlir-opt --ttir-to-ttnn-backend-pipeline="system-desc-path=%system_desc_path%" %s > %t.mlir +// RUN: FileCheck %s --input-file=%t.mlir +// RUN: ttmlir-translate --ttnn-to-flatbuffer %t.mlir > %t.ttnn +module { + func.func @repeat(%arg0: tensor<1x32x32xf32>) -> tensor<32x32x32xf32> { + // CHECK: "ttnn.repeat" + // CHECK-SAME: repeat_dims = [32 : i32, 1 : i32, 1 : i32] + %0 = tensor.empty() : tensor<32x32x32xf32> + %1 = "ttir.repeat"(%arg0, %0) {repeat_dimensions = array} : (tensor<1x32x32xf32>, tensor<32x32x32xf32>) -> tensor<32x32x32xf32> + return %1 : tensor<32x32x32xf32> + } +} diff --git a/test/ttmlir/Silicon/TTNN/repeat_interleave/simple_repeat_interleave.mlir b/test/ttmlir/Silicon/TTNN/data_movement/repeat_interleave/simple_repeat_interleave.mlir similarity index 100% rename from test/ttmlir/Silicon/TTNN/repeat_interleave/simple_repeat_interleave.mlir rename to test/ttmlir/Silicon/TTNN/data_movement/repeat_interleave/simple_repeat_interleave.mlir diff --git a/test/ttmlir/Silicon/TTNN/implicit_broadcast.mlir b/test/ttmlir/Silicon/TTNN/implicit_broadcast.mlir index bd6d811c96..0e8e0e4074 100644 --- a/test/ttmlir/Silicon/TTNN/implicit_broadcast.mlir +++ b/test/ttmlir/Silicon/TTNN/implicit_broadcast.mlir @@ -19,7 +19,7 @@ func.func @main(%arg0: tensor<128xf32>, %arg1: tensor<128xf32>) -> tensor<784x12 // CHECK-NOT: "ttnn.repeat" // CHECK: %{{[0-9]+}} = "ttnn.reshape" // CHECK: %{{[0-9]+}} = "ttnn.repeat" - // CHECK-SAME: shape = [784 : i32, 1 : i32] + // CHECK-SAME: repeat_dims = [784 : i32, 1 : i32] // CHECK: %{{[0-9]+}} = "ttnn.add" %0 = tensor.empty() : tensor<1x128xf32> %1 = "ttir.reshape"(%arg0, %0) <{shape = [1 : i32, 128 : i32]}> : (tensor<128xf32>, tensor<1x128xf32>) -> tensor<1x128xf32> diff --git a/test/ttmlir/Silicon/TTNN/simple_repeat.mlir b/test/ttmlir/Silicon/TTNN/simple_repeat.mlir index 2000530a01..8f0a1d3a4e 100644 --- a/test/ttmlir/Silicon/TTNN/simple_repeat.mlir +++ b/test/ttmlir/Silicon/TTNN/simple_repeat.mlir @@ -30,7 +30,7 @@ module { func.func @main(%arg0: tensor<1x23x40x1xf32>, %arg1: tensor<128xf32>) -> tensor<1x23x40x128xf32> { // CHECK: %{{[0-9]+}} = "ttnn.reshape" // CHECK: %{{[0-9]+}} = "ttnn.repeat" - // CHECK-SAME: shape = [1 : i32, 23 : i32, 40 : i32, 1 : i32] + // CHECK-SAME: repeat_dims = [1 : i32, 23 : i32, 40 : i32, 1 : i32] %0 = tensor.empty() : tensor<1x23x40x128xf32> %1 = "ttir.broadcast"(%arg0, %0) <{broadcast_dimensions = array}> : (tensor<1x23x40x1xf32>, tensor<1x23x40x128xf32>) -> tensor<1x23x40x128xf32> %2 = tensor.empty() : tensor<1x1x1x128xf32> @@ -46,7 +46,7 @@ module { module { func.func @main(%arg0: tensor<6x2xf32>) -> tensor<2400x2xf32> { // CHECK: %{{[0-9]+}} = "ttnn.repeat" - // CHECK-SAME: shape = [400 : i32, 1 : i32, 1 : i32, 1 : i32] + // CHECK-SAME: repeat_dims = [400 : i32, 1 : i32, 1 : i32, 1 : i32] %0 = tensor.empty() : tensor<1x6x2xf32> %1 = "ttir.reshape"(%arg0, %0) <{shape = [1 : i32, 6 : i32, 2 : i32]}> : (tensor<6x2xf32>, tensor<1x6x2xf32>) -> tensor<1x6x2xf32> %2 = tensor.empty() : tensor<1x6x1x2xf32>