Skip to content

Commit 253481f

Browse files
q10facebook-github-bot
authored andcommitted
Fold ops registration code, pt 1 (pytorch#3634)
Summary: X-link: facebookresearch/FBGEMM#710 - Fold ops registration code into a single location Differential Revision: D68850095
1 parent 54e83db commit 253481f

File tree

3 files changed

+163
-56
lines changed

3 files changed

+163
-56
lines changed

fbgemm_gpu/fbgemm_gpu/quantize/__init__.py

Lines changed: 29 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -6,65 +6,38 @@
66

77
# pyre-strict
88

9-
import torch
10-
119
from fbgemm_gpu.quantize.quantize_ops import dequantize_mx, quantize_mx # noqa F401
12-
13-
14-
def op_registeration(
15-
lib, # pyre-ignore[2]
16-
op_name, # pyre-ignore[2]
17-
fn, # pyre-ignore[2]
18-
dispatch_key, # pyre-ignore[2]
19-
) -> None:
10+
from fbgemm_gpu.utils import TorchLibraryFragment
11+
12+
lib = TorchLibraryFragment("fbgemm")
13+
14+
lib.define(
15+
"""quantize_mx(
16+
Tensor input,
17+
int scale_bits,
18+
int elem_ebits,
19+
int elem_mbits,
20+
float elem_max_norm,
21+
int mx_group_size,
22+
int? rounding_mode = None
23+
) -> Tensor
2024
"""
21-
Registers an op with the given name and dispatch key only once.
22-
23-
Args:
24-
lib: torch.library (e.g., torch.library.Library("fbgemm", "FRAGMENT"))
25-
op_name: operator name
26-
fn: function that's the operator implementation for the input dispatch key
27-
dispatch_key: dispatch key that the function should be registered for (e.g., "CUDA")
25+
)
2826

29-
Returns:
30-
None
31-
32-
Example:
33-
lib = torch.library.Library("fbgemm", "FRAGMENT")
34-
lib.define(...)
35-
op_registeration(lib, "quantize_mx", quantize_mx, "CUDA")
27+
lib.define(
28+
"""dequantize_mx(
29+
Tensor input,
30+
int mx_group_size
31+
) -> Tensor
3632
"""
37-
full_op_name = "fbgemm::" + op_name
38-
if not torch._C._dispatch_has_kernel_for_dispatch_key(full_op_name, dispatch_key):
39-
lib.impl(op_name, fn, dispatch_key)
40-
41-
42-
lib = torch.library.Library("fbgemm", "FRAGMENT")
43-
44-
if "fbgemm::quantize_mx" not in torch.library._defs:
45-
lib.define(
46-
"""quantize_mx(
47-
Tensor input,
48-
int scale_bits,
49-
int elem_ebits,
50-
int elem_mbits,
51-
float elem_max_norm,
52-
int mx_group_size,
53-
int? rounding_mode = None
54-
) -> Tensor
55-
"""
56-
)
33+
)
5734

58-
if "fbgemm::dequantize_mx" not in torch.library._defs:
59-
lib.define(
60-
"""dequantize_mx(
61-
Tensor input,
62-
int mx_group_size
63-
) -> Tensor
64-
"""
65-
)
35+
lib.register(
36+
"quantize_mx",
37+
{"CUDA": quantize_mx, "CPU": quantize_mx},
38+
)
6639

67-
op_registeration(lib, "quantize_mx", quantize_mx, "CUDA")
68-
op_registeration(lib, "quantize_mx", quantize_mx, "CPU")
69-
op_registeration(lib, "dequantize_mx", dequantize_mx, "CUDA")
70-
op_registeration(lib, "dequantize_mx", dequantize_mx, "CPU")
40+
lib.register(
41+
"dequantize_mx",
42+
{"CUDA": dequantize_mx, "CPU": dequantize_mx},
43+
)

fbgemm_gpu/fbgemm_gpu/utils/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,3 +6,5 @@
66
# LICENSE file in the root directory of this source tree.
77

88
# pyre-unsafe
9+
10+
from fbgemm_gpu.utils.torch_library import TorchLibraryFragment # noqa F401
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
#!/usr/bin/env python3
2+
# Copyright (c) Meta Platforms, Inc. and affiliates.
3+
# All rights reserved.
4+
#
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.
7+
8+
# pyre-strict
9+
10+
import re
11+
from typing import Callable, Dict
12+
13+
import torch
14+
15+
16+
class TorchLibraryFragment:
17+
"""
18+
A wrapper class around PyTorch library fragments, which are used to define
19+
and register PyTorch operators. Handles duplicate operator definitions and
20+
registrations under the hood.
21+
"""
22+
23+
def __init__(self, namespace: str) -> None:
24+
"""
25+
Constructs the TorchLibraryFragment class.
26+
27+
Args:
28+
namespace: The namespace for the operators.
29+
30+
Returns:
31+
None
32+
33+
Example:
34+
lib = TorchLibrary("fbgemm")
35+
"""
36+
self.namespace = namespace
37+
self.lib = torch.library.Library(namespace, "FRAGMENT")
38+
39+
def define(self, schema: str) -> None:
40+
"""
41+
Defines an operator schema. This function handles the case where the
42+
opeator name has already been defined.
43+
44+
Args:
45+
schema: The schema of the operator to be defined. The operator name
46+
should NOT be prefixed with the operator namespace.
47+
48+
Returns:
49+
None
50+
51+
Example:
52+
lib = TorchLibrary("fbgemm")
53+
lib.define("sll_jagged_jagged_bmm(Tensor x, Tensor y, bool flag=True) -> Tensor")
54+
"""
55+
pattern = re.compile(
56+
r"""
57+
(\w+) # Match the function name (capturing group)
58+
\s*\( # Match the opening parenthesis with optional whitespace
59+
([^)]*) # Match params list (capturing group)
60+
\s*\) # Match the closing parenthesis with optional whitespace
61+
\s*->\s*.+ # Match '-> <Return Type>'
62+
""",
63+
re.VERBOSE,
64+
)
65+
66+
match = pattern.search(schema.strip())
67+
if match:
68+
name = match.group(1)
69+
if f"{self.namespace}::{name}" not in torch.library._defs:
70+
self.lib.define(schema)
71+
else:
72+
raise ValueError(
73+
f"PyTorch operator schema appears to be ill-defined: '''{schema}'''"
74+
)
75+
76+
# pyre-ignore[24]
77+
def register_dispatch(self, op_name: str, dispatch_key: str, fn: Callable) -> None:
78+
"""
79+
Registers a single dispatch for an operator with the given name and dispatch key.
80+
81+
Args:
82+
op_name: operator name
83+
dispatch_key: dispatch key that the function should be registered for (e.g., "CUDA")
84+
fn: a function that is the operator implementation for the input dispatch key
85+
86+
Returns:
87+
None
88+
89+
Example:
90+
lib = TorchLibrary("fbgemm")
91+
lib.define(...)
92+
lib.register_dispatch(lib, "jagged_dense_bmm", jagged_dense_bmm, "CUDA")
93+
"""
94+
95+
valid_backends = [
96+
"CUDA",
97+
"AutogradCUDA",
98+
"CPU",
99+
"AutogradCPU",
100+
"AutogradMeta",
101+
"Meta",
102+
"CompositeImplicitAutograd",
103+
]
104+
assert dispatch_key in valid_backends
105+
106+
if not torch._C._dispatch_has_kernel_for_dispatch_key(
107+
f"{self.namespace}::{op_name}", dispatch_key
108+
):
109+
if dispatch_key == "Meta":
110+
self.lib._register_fake(op_name, fn)
111+
else:
112+
self.lib.impl(op_name, fn, dispatch_key)
113+
114+
# pyre-ignore[24]
115+
def register(self, op_name: str, functors: Dict[str, Callable]) -> None:
116+
"""
117+
Registers a set of dispatches for a defined operator.
118+
119+
Args:
120+
op_name: operator name
121+
functors: A dictionary of dispatch keys to dispatch implementations
122+
123+
Returns:
124+
None
125+
126+
Example:
127+
lib = TorchLibrary("fbgemm")
128+
lib.define(...)
129+
lib.register(lib, "jagged_dense_bmm", {"CUDA": jagged_dense_bmm, "Meta": jagged_dense_bmm_meta })
130+
"""
131+
for dispatch, func in functors.items():
132+
self.register_dispatch(op_name, dispatch, func)

0 commit comments

Comments
 (0)