Skip to content

Commit f5ff1b5

Browse files
q10facebook-github-bot
authored andcommitted
Migrate TBE benchmark utilities over to TBE, pt 3 (pytorch#3785)
Summary: X-link: facebookresearch/FBGEMM#870 Pull Request resolved: pytorch#3785 - Migrate TBE benchmark utilities over to TBE, pt 3 Reviewed By: jianyuh Differential Revision: D70745336
1 parent 8d2f925 commit f5ff1b5

File tree

6 files changed

+22
-18
lines changed

6 files changed

+22
-18
lines changed

fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py

Lines changed: 1 addition & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@
4949
)
5050
from fbgemm_gpu.tbe.bench import (
5151
bench_warmup,
52+
benchmark_cpu_requests,
5253
benchmark_eval_compression,
5354
benchmark_pipelined_requests,
5455
benchmark_requests,
@@ -978,24 +979,6 @@ def cache( # noqa C901
978979
)
979980

980981

981-
def benchmark_cpu_requests(
982-
requests: List[TBERequest],
983-
func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],
984-
num_warmups: int = 0,
985-
) -> float:
986-
import time
987-
988-
if num_warmups > 0:
989-
for _ in range(num_warmups):
990-
func(*(requests[0].unpack_3()))
991-
992-
start_time = time.perf_counter()
993-
for req in requests:
994-
func(*(req.unpack_3()))
995-
end_time = time.perf_counter()
996-
return (end_time - start_time) / len(requests)
997-
998-
999982
@cli.command()
1000983
@click.option("--alpha", default=1.0)
1001984
@click.option("--bag-size", default=20)

fbgemm_gpu/fbgemm_gpu/tbe/bench/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
)
1414
from .bench_runs import ( # noqa F401
1515
bench_warmup,
16+
benchmark_cpu_requests,
1617
benchmark_pipelined_requests,
1718
benchmark_requests,
1819
benchmark_requests_refer,

fbgemm_gpu/fbgemm_gpu/tbe/bench/bench_runs.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,9 @@
1212
from typing import Callable, List, Optional, Tuple
1313

1414
import torch
15+
1516
from fbgemm_gpu.tbe.utils import b_indices, TBERequest # noqa: F401
17+
from torch import Tensor
1618

1719
logging.basicConfig(level=logging.DEBUG)
1820

@@ -39,6 +41,24 @@ def bench_warmup(
3941
out.backward(grad)
4042

4143

44+
def benchmark_cpu_requests(
45+
requests: List[TBERequest],
46+
func: Callable[[Tensor, Tensor, Optional[Tensor]], Tensor],
47+
num_warmups: int = 0,
48+
) -> float:
49+
import time
50+
51+
if num_warmups > 0:
52+
for _ in range(num_warmups):
53+
func(*(requests[0].unpack_3()))
54+
55+
start_time = time.perf_counter()
56+
for req in requests:
57+
func(*(req.unpack_3()))
58+
end_time = time.perf_counter()
59+
return (end_time - start_time) / len(requests)
60+
61+
4262
def benchmark_requests( # noqa: C901
4363
requests: List[TBERequest],
4464
func: Callable[[torch.Tensor, torch.Tensor, Optional[torch.Tensor]], torch.Tensor],

0 commit comments

Comments
 (0)