diff --git a/ttnn/cpp/ttnn/operations/data_movement/copy/device/copy_device_operation.cpp b/ttnn/cpp/ttnn/operations/data_movement/copy/device/copy_device_operation.cpp index 12401b334e0..dae7af67297 100644 --- a/ttnn/cpp/ttnn/operations/data_movement/copy/device/copy_device_operation.cpp +++ b/ttnn/cpp/ttnn/operations/data_movement/copy/device/copy_device_operation.cpp @@ -13,14 +13,14 @@ namespace ttnn::operations::data_movement { void CopyDeviceOperation::validate_with_output_tensors( const std::vector& input_tensors, const std::vector>& output_tensors) const { const auto& input_tensor_a = input_tensors.at(0); - TT_FATAL( - input_tensor_a.get_dtype() == DataType::BFLOAT16 or input_tensor_a.get_dtype() == DataType::BFLOAT8_B or - input_tensor_a.get_dtype() == DataType::FLOAT32 or input_tensor_a.get_dtype() == DataType::BFLOAT4_B, - "Typecast operation is only supported on Grayskull for float/bfloat inputs"); - TT_FATAL( - this->output_dtype == DataType::BFLOAT16 or this->output_dtype == DataType::BFLOAT8_B or - this->output_dtype == DataType::FLOAT32 or this->output_dtype == DataType::BFLOAT4_B, - "Typecast operation is only supported on Grayskull for float/bfloat outputs"); + TT_FATAL(input_tensor_a.get_dtype() == DataType::BFLOAT16 or input_tensor_a.get_dtype() == DataType::BFLOAT8_B or + input_tensor_a.get_dtype() == DataType::FLOAT32 or input_tensor_a.get_dtype() == DataType::BFLOAT4_B, + "ttnn.copy only supports float/bfloat inputs but got {}", + input_tensor_a.get_dtype()); + TT_FATAL(this->output_dtype == DataType::BFLOAT16 or this->output_dtype == DataType::BFLOAT8_B or + this->output_dtype == DataType::FLOAT32 or this->output_dtype == DataType::BFLOAT4_B, + "ttnn.copy only supports float/bfloat output tensors but got {}", + this->output_dtype); TT_FATAL(input_tensor_a.storage_type() == StorageType::DEVICE, "Operands to copy need to be on device!"); TT_FATAL(input_tensor_a.buffer() != nullptr, "Operands to copy need to be allocated in buffers on device!"); TT_FATAL(input_tensor_a.memory_config().memory_layout == TensorMemoryLayout::INTERLEAVED, "Error");