Skip to content

Commit

Permalink
adjust cache size
Browse files Browse the repository at this point in the history
Signed-off-by: Anatoly Myachev <[email protected]>
  • Loading branch information
anmyachev committed Sep 20, 2024
1 parent 49fc392 commit a59c94e
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 2 deletions.
5 changes: 4 additions & 1 deletion benchmarks/triton_kernels_benchmark/benchmark_testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,10 @@ def do_bench_ipex(fn, warmup=25, rep=100, grad_to_none=None, quantiles=None, fas
# We maintain a buffer of 256 MB that we clear
# before each kernel call to make sure that the L2
# doesn't contain any input data before the run
cache_size = 256 * 1024 * 1024
factor = 1
if os.getenv("ZE_FLAT_DEVICE_HIERARCHY", "FLAT") == "COMPOSITE":
factor = 2
cache_size = factor * 256 * 1024 * 1024
if fast_flush:
cache = torch.empty(int(cache_size // 4), dtype=torch.int, device=device)
else:
Expand Down
5 changes: 4 additions & 1 deletion python/triton/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,10 @@ def do_bench(fn, warmup=25, rep=100, grad_to_none=None, quantiles=None, fast_flu
# We maintain a buffer of 256 MB that we clear
# before each kernel call to make sure that the L2 cache
# doesn't contain any input data before the run
cache_size = 256 * 1024 * 1024
factor = 1
if os.getenv("ZE_FLAT_DEVICE_HIERARCHY", "FLAT") == "COMPOSITE":
factor = 2
cache_size = factor * 256 * 1024 * 1024
if fast_flush:
cache = torch.empty(int(cache_size // 4), dtype=torch.int, device=device_type)
else:
Expand Down

0 comments on commit a59c94e

Please sign in to comment.