Skip to content

Commit

Permalink
#16310: update digamma docs
Browse files Browse the repository at this point in the history
  • Loading branch information
mouliraj-mcw committed Jan 23, 2025
1 parent 78f2c0b commit ac7755a
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def run(
torch.manual_seed(0)

torch_input_tensor = gen_func_with_cast_tt(
partial(torch_random, low=0.0001, high=100, dtype=torch.float32), input_dtype
partial(torch_random, low=1, high=100, dtype=torch.float32), input_dtype
)(input_shape)
golden_function = ttnn.get_golden_function(ttnn.digamma)
torch_output_tensor = golden_function(torch_input_tensor)
Expand All @@ -58,6 +58,7 @@ def run(
layout=input_layout,
device=device,
memory_config=input_memory_config,
pad_value=1,
)

start_time = start_measuring_time()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def run(
)

torch_input_tensor_a = gen_func_with_cast_tt(
partial(torch_random, low=0.0001, high=100, dtype=torch.float32), input_a_dtype
partial(torch_random, low=1, high=100, dtype=torch.float32), input_a_dtype
)(input_shape)
golden_function = ttnn.get_golden_function(ttnn.digamma)
torch_output_tensor = golden_function(torch_input_tensor_a)
Expand All @@ -103,6 +103,7 @@ def run(
layout=input_layout,
device=device,
memory_config=sharded_config,
pad_value=1,
)

start_time = start_measuring_time()
Expand Down
4 changes: 2 additions & 2 deletions ttnn/cpp/ttnn/operations/eltwise/unary/unary_pybind.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -1666,8 +1666,8 @@ void py_module(py::module& module) {
R"doc(System memory is not supported.)doc");
detail::bind_unary_composite(module, ttnn::cbrt, R"doc(Performs cbrt function on :attr:`input_tensor`.)doc");
detail::bind_unary_composite(module, ttnn::cosh, R"doc(Performs cosh function on :attr:`input_tensor`.)doc", "[supported range -9 to 9]", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::digamma, R"doc(Performs digamma function on :attr:`input_tensor`.)doc", "[supported for values greater than 0].",
R"doc(BFLOAT16, BFLOAT8_B)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", "", R"doc(torch.tensor([[2, 3], [4, 5]], dtype=torch.bfloat16))doc");
detail::bind_unary_composite(module, ttnn::digamma, R"doc(Performs digamma function on :attr:`input_tensor`.)doc", "[supported range 1 to inf].",
R"doc(BFLOAT16, BFLOAT8_B)doc", R"doc(TILE)doc", R"doc(2, 3, 4)doc", "For BFLOAT8_B, the padding value must be set to 1 due to BFLOAT8_B limitations.More information about the `BFLOAT8_B <../tensor.html#limitation-of-bfloat8-b>`_.", R"doc(torch.tensor([[2, 3], [4, 5]], dtype=torch.bfloat16))doc");
detail::bind_unary_composite(module, ttnn::lgamma, R"doc(Performs lgamma function on :attr:`input_tensor`.)doc", "[supported for value greater than 0].", R"doc(BFLOAT16)doc");
detail::bind_unary_composite(module, ttnn::log1p, R"doc(Performs log1p function on :attr:`input_tensor`.)doc", "[supported range -1 to 1].", R"doc(BFLOAT16, BFLOAT8_B)doc");
detail::bind_unary_composite(module, ttnn::mish, R"doc(Performs mish function on :attr:`input_tensor`.)doc", "[supported range -20 to inf].", R"doc(BFLOAT16, BFLOAT8_B)doc",
Expand Down

0 comments on commit ac7755a

Please sign in to comment.