Skip to content

Commit

Permalink
#12349: Enabling pytests on BH that are now working
Browse files Browse the repository at this point in the history
  • Loading branch information
abhullar-tt committed Oct 26, 2024
1 parent 39f805f commit cac2127
Show file tree
Hide file tree
Showing 10 changed files with 17 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,14 @@
from tests.tt_eager.python_api_testing.sweep_tests.run_pytorch_ci_tests import (
run_single_pytorch_test,
)
from models.utility_functions import is_grayskull, skip_for_blackhole
from models.utility_functions import is_grayskull

mem_configs = [
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.DRAM),
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.L1),
]


@skip_for_blackhole("Only supported for WH, see #12349")
@pytest.mark.parametrize("accurate_mode", [False, True])
@pytest.mark.parametrize("round_mode", ["None", "trunc", "floor"])
@pytest.mark.parametrize(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,14 @@
from tests.tt_eager.python_api_testing.sweep_tests.run_pytorch_ci_tests import (
run_single_pytorch_test,
)
from models.utility_functions import is_grayskull, skip_for_blackhole
from models.utility_functions import is_grayskull

mem_configs = [
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.DRAM),
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.L1),
]


@skip_for_blackhole("Only supported on WH, see #12349")
@pytest.mark.parametrize("accurate_mode", [True])
@pytest.mark.parametrize("round_mode", ["None", "trunc", "floor"])
@pytest.mark.parametrize(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,14 @@
from tests.tt_eager.python_api_testing.sweep_tests.run_pytorch_ci_tests import (
run_single_pytorch_test,
)
from models.utility_functions import skip_for_grayskull, skip_for_blackhole
from models.utility_functions import skip_for_grayskull

mem_configs = [
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.DRAM),
ttnn.MemoryConfig(ttnn.TensorMemoryLayout.INTERLEAVED, ttnn.BufferType.L1),
]


@skip_for_blackhole("Only supported on WH, see #12349")
@pytest.mark.parametrize(
"input_shapes",
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import random
from functools import partial
import ttnn
from models.utility_functions import skip_for_grayskull, skip_for_blackhole
from models.utility_functions import skip_for_grayskull

from tests.tt_eager.python_api_testing.sweep_tests import (
comparison_funcs,
Expand All @@ -23,7 +23,6 @@
]


@skip_for_blackhole("Unsupported on BH, see #12349")
@pytest.mark.parametrize(
"input_shapes",
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -469,6 +469,7 @@ def run_test_sdpa_decode_single_iter(
assert out_pass


@skip_for_blackhole("Unsupported on BH, see #12349")
@skip_for_grayskull("Unsupported in GS since L1 runs OOM with most configs")
@pytest.mark.parametrize(
"dtype, q_dtype",
Expand Down Expand Up @@ -518,6 +519,7 @@ def test_sdpa_decode(
)


@skip_for_blackhole("Unsupported on BH, see #12349")
@skip_for_grayskull("Unsupported in GS since L1 runs OOM with most configs")
@pytest.mark.parametrize(
"dtype, q_dtype",
Expand Down Expand Up @@ -550,6 +552,7 @@ def test_sdpa_decode_non_causal(device, b, nh, nkv, s, d, dtype, grid_size, q_dt
assert device.num_program_cache_entries() == 1


@skip_for_blackhole("Unsupported on BH, see #12349")
@skip_for_grayskull("Unsupported in GS since L1 runs OOM with most configs")
@pytest.mark.parametrize(
"dtype, q_dtype",
Expand Down Expand Up @@ -869,6 +872,7 @@ def test_sdpa_decode_paged_attention(
assert device.num_program_cache_entries() == 4


@skip_for_blackhole("Unsupported on BH, see #12349")
@skip_for_grayskull("Unsupported in GS since L1 runs OOM with most configs")
@pytest.mark.parametrize(
"dtype, q_dtype",
Expand Down Expand Up @@ -898,6 +902,7 @@ def test_sdpa_decode_sharded(device, b, nh, nkv, s, d, dtype, grid_size, q_dtype
)


@skip_for_blackhole("Unsupported on BH, see #12349")
@skip_for_grayskull("Unsupported in GS since L1 runs OOM with most configs")
@pytest.mark.skip("Skipping Perf Test in CI")
def test_sdpa_decode_perf(device, use_program_cache):
Expand Down Expand Up @@ -952,6 +957,7 @@ def test_sdpa_decode_perf(device, use_program_cache):
)


@skip_for_blackhole("Unsupported on BH, see #12349")
@skip_for_grayskull("Unsupported in GS since L1 runs OOM with most configs")
@pytest.mark.parametrize(
"dtype",
Expand Down Expand Up @@ -1171,6 +1177,7 @@ def run_test_sdpa_decode_ndpcc(device, b, nh, nkv, s, d, dtype, grid_size, q_dty
logger.info(f"PCC failed Start Pos: {failed_start_pos}")


@skip_for_blackhole("Unsupported on BH, see #12349")
@pytest.mark.timeout(600)
@pytest.mark.skip("Skipping due to causing 45 minutes timeout on tt eager unit tests")
@skip_for_grayskull("Unsupported in GS since L1 runs OOM with most configs")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
from models.utility_functions import skip_for_blackhole


@skip_for_blackhole("Mismatching on BH, see #12349")
@pytest.mark.parametrize(
"shape_dim",
(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -631,6 +631,7 @@ def test_transpose_bfloat8_b(device, shape, swap_dims):
assert_with_pcc(torch_output, tt_output, 0.9999)


@skip_for_blackhole("Mismatching on BH, see #12349")
@pytest.mark.parametrize(
"dtype",
(ttnn.bfloat16, ttnn.float32),
Expand All @@ -649,6 +650,7 @@ def test_transpose_hc(dtype, shape, device):
transpose(shape, device, dim0=1, dim1=-2, input_dtype=dtype)


@skip_for_blackhole("Mismatching on BH, see #12349")
@pytest.mark.parametrize(
"dtype",
(ttnn.bfloat16, ttnn.float32),
Expand Down Expand Up @@ -677,6 +679,7 @@ def test_transpose_2D(dtype, shape, layout, device):
assert_with_pcc(torch_output, tt_output, 0.9999)


@skip_for_blackhole("Mismatching on BH, see #12349")
@pytest.mark.parametrize(
"dtype",
(ttnn.bfloat16, ttnn.float32),
Expand Down Expand Up @@ -758,6 +761,7 @@ def test_transpose_failures(config, device):
assert_with_pcc(torch_output, tt_output, 0.9999)


@skip_for_blackhole("Mismatching on BH, see #12349")
@pytest.mark.parametrize(
"config",
[
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ def get_tensors(input_shape, output_shape, device):
return tt_input, tt_output, torch_input


@skip_for_blackhole("Mismatching on BH, see #12349")
@pytest.mark.parametrize(
"input_shape",
(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ Tensor ExecuteBinaryRemainder::invoke(const Tensor& input, float scalar, const s
// Binary FMOD will be overloaded by unary FMOD in another PR
Tensor ExecuteBinaryFmod::invoke(const Tensor& input_a, const Tensor& input_b, const std::optional<MemoryConfig>& output_mem_config) {
auto arch = input_a.device()->arch();
TT_FATAL(arch == tt::ARCH::WORMHOLE_B0, "Op is only supported on Wormhole");
TT_FATAL(arch == tt::ARCH::WORMHOLE_B0 or arch == tt::ARCH::BLACKHOLE, "Op is only supported on Wormhole or Blackhole");
DataType input_dtype = input_a.get_dtype();
Tensor a = typecast(input_a, DataType::FLOAT32);
Tensor b = typecast(input_b, DataType::FLOAT32);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -817,7 +817,7 @@ Tensor _normalize_global(const Tensor& y, const std::optional<MemoryConfig>& ou

Tensor _frac(const Tensor& input, const std::optional<MemoryConfig>& output_mem_config) {
auto arch = input.device()->arch();
TT_FATAL(arch == tt::ARCH::WORMHOLE_B0, "Op is only supported on Wormhole");
TT_FATAL(arch == tt::ARCH::WORMHOLE_B0 or arch == tt::ARCH::BLACKHOLE, "Op is only supported on Wormhole or Blackhole");
Tensor trunc_res = ttnn::trunc(input);
Tensor result = ttnn::subtract(input, trunc_res, std::nullopt, output_mem_config);
return result;
Expand Down

0 comments on commit cac2127

Please sign in to comment.