Skip to content

Commit

Permalink
#0: fix model and sweeps
Browse files Browse the repository at this point in the history
  • Loading branch information
yugaoTT committed Nov 1, 2024
1 parent 32c62b9 commit 0fa3566
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,8 @@ def run_bert_large_matmul_test(
)

if bias_mem_config is not None:
bias_t = (
ttnn.Tensor(BIAS, bias_dtype)
.pad(bias_pad_shape, [0, 0, 0, 0], 0)
.to(ttnn.TILE_LAYOUT)
.to(device, bias_mem_config)
bias_t = ttnn.from_torch(
BIAS, dtype=bias_dtype, layout=ttnn.TILE_LAYOUT, memory_config=bias_mem_config, device=device
)
else:
bias_t = None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,7 @@ def test_softmax(device, in_dtype, causal_mask, grid_size, seq_len, scale_mask):
else:
tt_output_sharded = ttnn.softmax_in_place(in1_t_shard, program_config=program_config)

tt_output = ttnn.sharded_to_interleaved(tt_output_sharded, in0_mem_config)
tt_output_tensor = tt_output.cpu().to_torch().float()
tt_output_tensor = torch.Tensor(tt_output_tensor).reshape(input_shape)
tt_output_tensor = untilize(tt_output_tensor)
tt_output_tensor = ttnn.to_torch(tt_output_sharded)

if causal_mask == False:
attention_mask = attention_mask.reshape(batch, 1, 1, seq_len)
Expand Down
15 changes: 1 addition & 14 deletions ttnn/cpp/ttnn/tensor/tensor_impl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -732,20 +732,7 @@ DeviceBuffer to_device_buffer(
using StorageType = std::decay_t<decltype(storage)>;
if constexpr (std::is_same_v<StorageType, OwnedStorage> or std::is_same_v<StorageType, BorrowedStorage>) {
auto data_to_write = host_buffer::get_as<T>(storage.buffer);
auto buffer_size = compute_buffer_size(shape, data_type, tile);
TT_ASSERT(
buffer_size == data_to_write.size(),
"Tensor buffer size and number of data elements does not match: {} != {}",
buffer_size,
data_to_write.size());
if (layout == Layout::TILE) {
auto tile_shape = tile.value_or(Tile{{constants::TILE_HEIGHT, constants::TILE_WIDTH}}).get_tile_shape();
TT_ASSERT(
(shape[-2] % tile_shape[0] == 0 && shape[-1] % tile_shape[1] == 0),
"Tensor shape incompatible for specified layout");
}
return initialize_data_on_device<T>(
data_to_write, device, shape, data_type, layout, memory_config, shard_spec, tile);
return initialize_data_on_device<T>(data_to_write, device, shape, tensor_layout, queue);
} else if constexpr (std::is_same_v<StorageType, DeviceStorage>) {
TT_THROW("Device storage doesn't support to_device_buffer");
} else if constexpr (std::is_same_v<StorageType, MultiDeviceStorage>) {
Expand Down

0 comments on commit 0fa3566

Please sign in to comment.