Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change create_tt_tensor_from_py_data to use from_vector #16999

Closed
wants to merge 30 commits into from
Closed
Show file tree
Hide file tree
Changes from 23 commits
Commits
Show all changes
30 commits
Select commit Hold shift + click to select a range
35ba041
Change create_tt_tensor_from_py_data to use from_vector
jjiangTT Jan 22, 2025
7ac5be0
removed switch case default, use bfloat16 for bfp_b types, shift bfp_…
jjiangTT Jan 23, 2025
cec8ec8
add encode_tensor_data to create_owned_tensor_from_row_major_data
jjiangTT Jan 23, 2025
4352603
removed graph printng and parameterized tests splitting out bfp_b
jjiangTT Jan 23, 2025
3360602
Remove extraneous comments and double declarations, change data_ptr t…
jjiangTT Jan 23, 2025
daad2e1
clean up create_owned_tensor_from_row_major_data
jjiangTT Jan 23, 2025
b332630
Merge branch 'main' into jjiang/16837-tensor_creation_and_conversion
jjiangTT Jan 23, 2025
8c6802b
additional minor formatting fixes
jjiangTT Jan 23, 2025
7b5d3b0
Merge branch 'jjiang/16837-tensor_creation_and_conversion' of https:/…
jjiangTT Jan 23, 2025
e9f5bf4
Added shape conversion testing, borrow testing for bfp_b types, and s…
jjiangTT Jan 24, 2025
fecf95c
Change create_tt_tensor_from_py_data to use from_vector
jjiangTT Jan 22, 2025
c0873cb
removed switch case default, use bfloat16 for bfp_b types, shift bfp_…
jjiangTT Jan 23, 2025
c6c387c
add encode_tensor_data to create_owned_tensor_from_row_major_data
jjiangTT Jan 23, 2025
8454b66
removed graph printng and parameterized tests splitting out bfp_b
jjiangTT Jan 23, 2025
c837112
Remove extraneous comments and double declarations, change data_ptr t…
jjiangTT Jan 23, 2025
42d8476
clean up create_owned_tensor_from_row_major_data
jjiangTT Jan 23, 2025
74a4412
additional minor formatting fixes
jjiangTT Jan 23, 2025
ba5de4c
Added shape conversion testing, borrow testing for bfp_b types, and s…
jjiangTT Jan 24, 2025
68cf514
Merge branch 'jjiang/16837-tensor_creation_and_conversion' of https:/…
jjiangTT Jan 24, 2025
ea0c02d
Fix layout error on validstorage test
jjiangTT Jan 24, 2025
b1b5f10
move validstorage test into test_convert_python_tensor.py, add shard …
jjiangTT Jan 27, 2025
482b0d2
fix shard bounding
jjiangTT Jan 27, 2025
cfa67a5
Merge branch 'main' into jjiang/16837-tensor_creation_and_conversion
jjiangTT Jan 28, 2025
f785091
remove unnecessary asserts and deprecated create_tensor method
jjiangTT Jan 28, 2025
8d4597b
fix test_convert_python_tensor imports, move convert_python to unit_t…
jjiangTT Jan 28, 2025
17ecda7
Remove todo for type checking logic
jjiangTT Jan 28, 2025
ff70a26
Merge branch 'main' into jjiang/16837-tensor_creation_and_conversion
jjiangTT Jan 28, 2025
cc8f9e0
fix graph tracing errors in test_convert_python, fix extraneous tt_fa…
jjiangTT Jan 28, 2025
c52e47f
fix pytorch type and comparison errors
jjiangTT Jan 29, 2025
d8c6c40
remove extraneous includes
jjiangTT Jan 29, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
207 changes: 205 additions & 2 deletions tests/ttnn/unit_tests/gtests/tensor/test_vector_conversion.cpp
jjiangTT marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,14 @@
#include <algorithm>
#include <cstdint>

#include "assert.hpp"
#include "buffer_constants.hpp"
#include "gmock/gmock.h"
jjiangTT marked this conversation as resolved.
Show resolved Hide resolved
#include "gtest/gtest.h"
#include "shape2d.hpp"
#include "tests/ttnn/unit_tests/gtests/ttnn_test_fixtures.hpp"
#include <tt-metalium/bfloat16.hpp>
#include <vector>
#include "ttnn/tensor/tensor.hpp"
#include "ttnn/tensor/tensor_utils.hpp"
#include "ttnn/tensor/types.hpp"
Expand Down Expand Up @@ -40,6 +46,26 @@ const std::vector<ttnn::SimpleShape>& get_shapes_for_test() {
return *shapes;
}

TensorSpec get_tensor_spec_with_memory(
const ttnn::SimpleShape& shape,
DataType dtype,
Layout layout = Layout::ROW_MAJOR,
TensorMemoryLayout mem_layout = TensorMemoryLayout::SINGLE_BANK) {
return TensorSpec(
shape,
TensorLayout(
dtype,
layout,
MemoryConfig{
.memory_layout = mem_layout,
.buffer_type = BufferType::DRAM,
.shard_spec = ShardSpec{
ttnn::CoreRangeSet{ttnn::CoreRange{ttnn::CoreRange{ttnn::CoreCoord{0, 0}, ttnn::CoreCoord{4, 1}}}},
{32, 64},
ShardOrientation::ROW_MAJOR,
ShardMode::LOGICAL}}));
}

TensorSpec get_tensor_spec(const ttnn::SimpleShape& shape, DataType dtype, Layout layout = Layout::ROW_MAJOR) {
return TensorSpec(shape, TensorLayout(dtype, layout, MemoryConfig{}));
}
Expand All @@ -64,11 +90,27 @@ class VectorConversionTest : public ::testing::Test {};
using TestTypes = ::testing::Types<float, bfloat16, uint8_t, uint16_t, uint32_t, int32_t>;
TYPED_TEST_SUITE(VectorConversionTest, TestTypes);

TYPED_TEST(VectorConversionTest, TensorShape) {
for (const auto& shape : get_shapes_for_test()) {
auto input = arange<TypeParam>(0, static_cast<int64_t>(shape.volume()), 1);
Tensor output = Tensor::from_vector(input, get_tensor_spec(shape, convert_to_data_type<TypeParam>()));

EXPECT_THAT(output.get_tensor_spec().shape(), Eq(shape)) << "for shape: " << shape;
}
}

TYPED_TEST(VectorConversionTest, Roundtrip) {
for (const auto& shape : get_shapes_for_test()) {
auto input = arange<TypeParam>(0, static_cast<int64_t>(shape.volume()), 1);
auto output = Tensor::from_vector(input, get_tensor_spec(shape, convert_to_data_type<TypeParam>()))
.template to_vector<TypeParam>();
auto tensor = Tensor::from_vector(input, get_tensor_spec(shape, convert_to_data_type<TypeParam>()));

TensorSpec tensor_spec = tensor.get_tensor_spec();

EXPECT_THAT(tensor_spec.shape(), Eq(shape)) << "for shape: " << shape;
EXPECT_THAT(tensor_spec.data_type(), Eq(convert_to_data_type<TypeParam>()));

auto output = tensor.template to_vector<TypeParam>();

EXPECT_THAT(output, Pointwise(Eq(), input)) << "for shape: " << shape;
}
}
Expand All @@ -81,6 +123,23 @@ TYPED_TEST(VectorConversionTest, InvalidSize) {
EXPECT_ANY_THROW(Tensor::from_vector(input, get_tensor_spec(shape, convert_to_data_type<TypeParam>())));
}

TYPED_TEST(VectorConversionTest, OddshapeRoundtripTilizedLayout) {
ttnn::SimpleShape shape{1, 40, 3, 121};

auto input = arange<TypeParam>(0, shape.volume(), 1);

auto tensor = Tensor::from_vector(input, get_tensor_spec(shape, convert_to_data_type<TypeParam>(), Layout::TILE));

ASSERT_NE(tensor.tensor_spec().padded_shape(), tensor.tensor_spec().logical_shape());

EXPECT_THAT(tensor.tensor_spec().logical_shape(), ShapeIs(1, 40, 3, 121));
EXPECT_THAT(tensor.get_padded_shape(), ShapeIs(1, 40, 32, 128));

auto output = tensor.template to_vector<TypeParam>();

EXPECT_THAT(output, Pointwise(Eq(), input));
}

TYPED_TEST(VectorConversionTest, RoundtripTilezedLayout) {
ttnn::SimpleShape shape{128, 128};

Expand Down Expand Up @@ -186,5 +245,149 @@ TEST_F(DeviceVectorConversionTest, RoundtripWithMemoryConfig) {
EXPECT_THAT(output.to_vector<float>(), Pointwise(Eq(), input));
}

// TODO: simplify into two layered class TYPED_TEST_P
jjiangTT marked this conversation as resolved.
Show resolved Hide resolved
class ShardVectorConversionTest : public ::testing::TestWithParam<TensorMemoryLayout> {};

TEST_P(ShardVectorConversionTest, UInt32RoundtripRowMajShardMapping) {
ttnn::SimpleShape shape{121, 128};

auto input = arange<uint32_t>(0, shape.volume(), 1);

auto tensor =
Tensor::from_vector(input, get_tensor_spec_with_memory(shape, DataType::UINT32, Layout::ROW_MAJOR, GetParam()));

auto output = tensor.template to_vector<uint32_t>();

EXPECT_THAT(output, Pointwise(Eq(), input));
}

TEST_P(ShardVectorConversionTest, UInt32RoundtripTilizedShardMapping) {
ttnn::SimpleShape shape{121, 128};

auto input = arange<uint32_t>(0, shape.volume(), 1);

auto tensor =
Tensor::from_vector(input, get_tensor_spec_with_memory(shape, DataType::UINT32, Layout::TILE, GetParam()));

ASSERT_NE(tensor.tensor_spec().logical_2d_shape(), tensor.tensor_spec().physical_shape());

EXPECT_THAT(tensor.get_logical_shape(), ShapeIs(121, 128));
EXPECT_THAT(tensor.get_padded_shape(), ShapeIs(128, 128));

auto output = tensor.template to_vector<uint32_t>();

EXPECT_THAT(output, Pointwise(Eq(), input));
}

TEST_P(ShardVectorConversionTest, FloatRoundtripRowMajShardMapping) {
ttnn::SimpleShape shape{121, 128};

auto input = arange<float>(0, shape.volume(), 1);

auto tensor = Tensor::from_vector(
input, get_tensor_spec_with_memory(shape, DataType::FLOAT32, Layout::ROW_MAJOR, GetParam()));

auto output = tensor.template to_vector<float>();

EXPECT_THAT(output, Pointwise(Eq(), input));
}

TEST_P(ShardVectorConversionTest, FloatRoundtripTilizedShardMapping) {
ttnn::SimpleShape shape{121, 128};

auto input = arange<float>(0, shape.volume(), 1);

auto tensor =
Tensor::from_vector(input, get_tensor_spec_with_memory(shape, DataType::FLOAT32, Layout::TILE, GetParam()));

ASSERT_NE(tensor.tensor_spec().logical_2d_shape(), tensor.tensor_spec().physical_shape());

EXPECT_THAT(tensor.get_logical_shape(), ShapeIs(121, 128));
EXPECT_THAT(tensor.get_padded_shape(), ShapeIs(128, 128));

auto output = tensor.template to_vector<float>();

EXPECT_THAT(output, Pointwise(Eq(), input));
}

TEST_P(ShardVectorConversionTest, Bfloat16RoundtripRowMajShardMapping) {
ttnn::SimpleShape shape{121, 128};

auto input_bf16 = arange<bfloat16>(0, static_cast<int64_t>(shape.volume()), 1);

std::vector<float> input_ft;
input_ft.reserve(input_bf16.size());
std::transform(
input_bf16.begin(), input_bf16.end(), std::back_inserter(input_ft), [](bfloat16 bf) { return bf.to_float(); });

auto tensor_bf = Tensor::from_vector(
input_ft, get_tensor_spec_with_memory(shape, DataType::BFLOAT16, Layout::ROW_MAJOR, GetParam()));

auto output_ft = tensor_bf.to_vector<float>();

EXPECT_THAT(output_ft, Pointwise(Eq(), input_ft));
}

TEST_P(ShardVectorConversionTest, Bfloat16RoundtripTilizedShardMapping) {
ttnn::SimpleShape shape{121, 128};

auto input_bf16 = arange<bfloat16>(0, static_cast<int64_t>(shape.volume()), 1);

std::vector<float> input_ft;
input_ft.reserve(input_bf16.size());
std::transform(
input_bf16.begin(), input_bf16.end(), std::back_inserter(input_ft), [](bfloat16 bf) { return bf.to_float(); });

auto tensor_bf =
Tensor::from_vector(input_ft, get_tensor_spec_with_memory(shape, DataType::BFLOAT16, Layout::TILE, GetParam()));

ASSERT_NE(tensor_bf.tensor_spec().logical_2d_shape(), tensor_bf.tensor_spec().physical_shape());
jjiangTT marked this conversation as resolved.
Show resolved Hide resolved

EXPECT_THAT(tensor_bf.get_logical_shape(), ShapeIs(121, 128));
EXPECT_THAT(tensor_bf.get_padded_shape(), ShapeIs(128, 128));

auto output_ft = tensor_bf.to_vector<float>();

EXPECT_THAT(output_ft, Pointwise(FloatNear(4.0f), input_ft));
}

TEST_P(ShardVectorConversionTest, BlockfloatRoundtripRowMajShardMapping) {
ttnn::SimpleShape shape{121, 128};

auto input = arange<float>(0, shape.volume(), 1, 32);

auto tensor =
Tensor::from_vector(input, get_tensor_spec_with_memory(shape, DataType::BFLOAT8_B, Layout::TILE, GetParam()));

EXPECT_THAT(tensor.to_vector<float>(), Pointwise(FloatNear(4.0f), input));
}

TEST_P(ShardVectorConversionTest, BlockfloatRoundtripTilizedShardMapping) {
ttnn::SimpleShape shape{121, 128};

auto input = arange<float>(0, shape.volume(), 1, 32);

auto tensor =
Tensor::from_vector(input, get_tensor_spec_with_memory(shape, DataType::BFLOAT8_B, Layout::TILE, GetParam()));

ASSERT_NE(tensor.tensor_spec().logical_2d_shape(), tensor.tensor_spec().physical_shape());
jjiangTT marked this conversation as resolved.
Show resolved Hide resolved

EXPECT_THAT(tensor.get_logical_shape(), ShapeIs(121, 128));
EXPECT_THAT(tensor.get_padded_shape(), ShapeIs(128, 128));

EXPECT_THAT(tensor.to_vector<float>(), Pointwise(FloatNear(4.0f), input));
}

INSTANTIATE_TEST_SUITE_P(
ShardVectorConversionTests,
ShardVectorConversionTest,
::testing::Values(
TensorMemoryLayout::INTERLEAVED,
TensorMemoryLayout::SINGLE_BANK,
TensorMemoryLayout::HEIGHT_SHARDED,
TensorMemoryLayout::WIDTH_SHARDED,
TensorMemoryLayout::BLOCK_SHARDED));

} // namespace

} // namespace ttnn
65 changes: 65 additions & 0 deletions tests/ttnn/unit_tests/test_convert_python_tensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
# SPDX-FileCopyrightText: © 2025 Tenstorrent Inc.


# SPDX-License-Identifier: Apache-2.0


import pathlib
import pytest


import torch


import ttnn
import ttnn.ttnn


@pytest.mark.parametrize("size", [64])
@pytest.mark.parametrize("mode", [ttnn.graph.RunMode.NO_DISPATCH, ttnn.graph.RunMode.NORMAL])
@pytest.mark.parametrize("dtype", [torch.int32, torch.float, torch.bfloat16, torch.uint8])
def test_convert_python_tensor(device, size, mode, dtype):
torch.manual_seed(0)

ttnn.graph.begin_graph_capture(mode)
torch_input_tensor = torch.rand((size,), (dtype))
input_tensor = ttnn.from_torch(torch_input_tensor, layout=ttnn.TILE_LAYOUT, device=device)
output_tensor = ttnn.to_torch(input_tensor, torch_rank=1)
captured_graph = ttnn.graph.end_graph_capture()
calltrace = ttnn.graph.extract_calltrace(captured_graph)

assert output_tensor == input_tensor

# note: change this test case if force_disable_borrow is exposed to user
assert output_tensor.storage_type() == ttnn.StorageType.BORROWED

assert "tt::tt_metal::detail::convert_python_tensor_to_tt_tensor" in calltrace
assert captured_graph[0]["node_type"] == "capture_start"
assert captured_graph[1]["node_type"] == "function_start"
assert captured_graph[1]["params"]["name"] == "tt::tt_metal::detail::convert_python_tensor_to_tt_tensor"
assert captured_graph[-2]["node_type"] == "buffer_deallocate"
assert captured_graph[-1]["node_type"] == "capture_end"


@pytest.mark.parametrize("size", [64])
@pytest.mark.parametrize("mode", [ttnn.graph.RunMode.NO_DISPATCH, ttnn.graph.RunMode.NORMAL])
@pytest.mark.parametrize("dtype", [ttnn.bfloat4_b, ttnn.bfloat8_b])
def test_convert_python_tensor_bfp_b(device, size, mode, dtype):
torch.manual_seed(0)

ttnn.graph.begin_graph_capture(mode)
torch_input_tensor = torch.rand((size,), torch.float)
input_tensor = ttnn.from_torch(torch_input_tensor, layout=ttnn.TILE_LAYOUT, device=device, dtype=(dtype))
output_tensor = ttnn.to_torch(input_tensor, torch_rank=1)
captured_graph = ttnn.graph.end_graph_capture()
calltrace = ttnn.graph.extract_calltrace(captured_graph)

assert output_tensor == input_tensor
assert output_tensor.storage_type() != ttnn.StorageType.BORROWED

assert "tt::tt_metal::detail::convert_python_tensor_to_tt_tensor" in calltrace
assert captured_graph[0]["node_type"] == "capture_start"
assert captured_graph[1]["node_type"] == "function_start"
assert captured_graph[1]["params"]["name"] == "tt::tt_metal::detail::convert_python_tensor_to_tt_tensor"
assert captured_graph[-2]["node_type"] == "buffer_deallocate"
assert captured_graph[-1]["node_type"] == "capture_end"
Loading
Loading