Skip to content

Commit 519be05

Browse files
airidas-metafacebook-github-bot
authored andcommitted
add lint to oss (pytorch#1427)
Summary: Pull Request resolved: pytorch#1427 Pull Request resolved: pytorch#945 As title. This Diff will be landed after the previous Diff is populated into production. Please feel free to accept and I will monitor the production package and land it later to avoid BC breaking issue. Differential Revision: D40620055 fbshipit-source-id: e5706bf11fb3056a988e08ccbb740947bfc9dc9b
1 parent 79f2073 commit 519be05

17 files changed

+231
-47
lines changed

.github/workflows/pylint.yaml

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
name: Lint
2+
3+
on:
4+
push:
5+
branches:
6+
- main
7+
8+
pull_request:
9+
branches:
10+
- main
11+
jobs:
12+
build:
13+
runs-on: ubuntu-latest
14+
strategy:
15+
matrix:
16+
python-version: ["3.8"]
17+
steps:
18+
- uses: actions/checkout@v2
19+
- name: Set up Python ${{ matrix.python-version }}
20+
uses: actions/setup-python@v2
21+
with:
22+
python-version: ${{ matrix.python-version }}
23+
- name: Install dependencies
24+
run: |
25+
python -m pip install --upgrade pip
26+
pip install ufmt
27+
pip install click
28+
pip install flake8
29+
- name: Analyzing the code with flake8
30+
run: |
31+
echo "::add-matcher::fbgemm_gpu/test/lint/flake8_problem_matcher.json"
32+
flake8 --ignore=E501,W503,E203 . # Ignore line too long issue
33+
- name: Analyzing the code with ufmt
34+
run: |
35+
ufmt diff fbgemm_gpu/fbgemm_gpu
36+
ufmt diff fbgemm_gpu/test
37+
ufmt diff fbgemm_gpu/bench
38+
- name: Check Meta copyright header
39+
run: |
40+
python fbgemm_gpu/test/lint/check_meta_header.py --path=./fbgemm_gpu/fbgemm_gpu --fixit=False
41+
python fbgemm_gpu/test/lint/check_meta_header.py --path=./fbgemm_gpu/test --fixit=False
42+
python fbgemm_gpu/test/lint/check_meta_header.py --path=./fbgemm_gpu/bench --fixit=False

fbgemm_gpu/bench/bench_utils.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33
# This source code is licensed under the BSD-style license found in the
44
# LICENSE file in the root directory of this source tree.
55

6-
import itertools
76
import logging
87
import statistics
98
import time
@@ -344,6 +343,7 @@ def benchmark_requests_refer(
344343
check_median: bool = False,
345344
) -> float:
346345
do_pooling = pooling_mode in ["sum", "mean"]
346+
347347
if do_pooling:
348348
nn_embedding_list = [
349349
torch.nn.EmbeddingBag(E, D, mode=pooling_mode, sparse=True).cuda()
@@ -397,12 +397,15 @@ def benchmark_requests_refer(
397397
)
398398
]
399399
)
400+
400401
if do_pooling:
401402
final_output = torch.cat(
402403
[f.view(B, -1) for f in nn_embedding_output], dim=1
403404
)
404405
else:
405-
final_output = torch.cat(nn_embedding_output, dim=0).view(-1, D)
406+
final_output = torch.cat(nn_embedding_output, dim=0).view(
407+
-1, D
408+
) # noqa: F841
406409

407410
if torch.cuda.is_available():
408411
end_event.record()

fbgemm_gpu/bench/merge_embeddings_benchmark.py

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,14 @@
1717
import tabulate
1818
import torch
1919

20+
from fbgemm_gpu.split_table_batched_embeddings_ops import (
21+
BoundsCheckMode,
22+
EmbeddingLocation,
23+
IntNBitTableBatchedEmbeddingBagsCodegen,
24+
SparseType,
25+
)
26+
from torch.profiler import profile, ProfilerActivity
27+
2028
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
2129
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
2230

@@ -32,15 +40,6 @@
3240
)
3341

3442

35-
from fbgemm_gpu.split_table_batched_embeddings_ops import (
36-
BoundsCheckMode,
37-
EmbeddingLocation,
38-
IntNBitTableBatchedEmbeddingBagsCodegen,
39-
SparseType,
40-
)
41-
from torch.profiler import profile, ProfilerActivity
42-
43-
4443
def get_gpu_device(gpu_num) -> torch.device:
4544
return torch.device(f"cuda:{gpu_num}")
4645

@@ -72,7 +71,7 @@ def generate_requests(
7271
E: int,
7372
# inter-batch indices reuse rate
7473
reuse: float = 0.0,
75-
) -> List[Tuple[torch.IntTensor, torch.IntTensor,]]:
74+
) -> List[Tuple[torch.IntTensor, torch.IntTensor, None]]:
7675
rs = []
7776
for gpu_num in range(num_gpus):
7877
all_indices = torch.randint(

fbgemm_gpu/bench/split_table_batched_embeddings_benchmark.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,15 +19,6 @@
1919
import fbgemm_gpu
2020
import numpy as np
2121
import torch
22-
23-
haveAIBench = False
24-
try:
25-
from aibench_observer.utils.observer import emitMetric
26-
27-
haveAIBench = True
28-
except Exception:
29-
haveAIBench = False
30-
3122
from fbgemm_gpu.split_table_batched_embeddings_ops import (
3223
BoundsCheckMode,
3324
CacheAlgorithm,
@@ -44,6 +35,15 @@
4435
)
4536
from torch import Tensor
4637

38+
haveAIBench = False
39+
try:
40+
from aibench_observer.utils.observer import emitMetric
41+
42+
haveAIBench = True
43+
except Exception:
44+
haveAIBench = False
45+
46+
4747
# pyre-fixme[16]: Module `fbgemm_gpu` has no attribute `open_source`.
4848
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
4949

@@ -1773,7 +1773,7 @@ def nbit_cache( # noqa C901
17731773
* B
17741774
* sum(Ds)
17751775
* L
1776-
# output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum(Ds) * L
1776+
+ output_size_multiplier * B * sum(Ds) + param_size_multiplier * B * sum(Ds) * L
17771777
)
17781778
logging.info(
17791779
f"{weights_precision} Embedding tables: {E * T} rows, {nparams_byte / param_size_multiplier / 1.0e9: .2f} GParam, "

fbgemm_gpu/fbgemm_gpu/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@
1919
# Use existence to check if fbgemm_gpu_py.so has already been loaded
2020
open_source: bool = True
2121

22-
from . import _fbgemm_gpu_docs
22+
# from . import _fbgemm_gpu_docs

fbgemm_gpu/fbgemm_gpu/_fbgemm_gpu_docs.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
import fbgemm_gpu
77
import fbgemm_gpu.split_table_batched_embeddings_ops
8-
import torch
8+
import torch # usort:skip
99

1010
Tensor = torch.Tensor
1111

@@ -85,12 +85,12 @@ def add_docs(method, docstr):
8585
"""
8686
dense_to_jagged(dense, x_offsets, total_L) -> (Tensor, Tensor[])
8787
88-
Converts a dense tensor into a jagged tensor, given the desired offsets of the resulting dense tensor.
88+
Converts a dense tensor into a jagged tensor, given the desired offsets of the resulting dense tensor.
8989
9090
Args:
9191
dense (Tensor): A dense input tensor to be converted
9292
93-
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
93+
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
9494
9595
total_L (int, Optional): Total number of values in the resulting jagged tensor.
9696
@@ -119,7 +119,7 @@ def add_docs(method, docstr):
119119
Args:
120120
values (Tensor): Jagged tensor values
121121
122-
offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
122+
offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
123123
124124
max_lengths (int[]): A list with max_length for each jagged dimension.
125125
@@ -147,13 +147,13 @@ def add_docs(method, docstr):
147147
"""
148148
jagged_dense_elementwise_add(x_values, x_offsets, y) -> Tensor
149149
150-
Adds a jagged tensor to a dense tensor, resulting in dense tensor. Jagged
150+
Adds a jagged tensor to a dense tensor, resulting in dense tensor. Jagged
151151
tensor input will be padded with zeros for the purposes of the addition.
152152
153153
Args:
154154
x_values (Tensor): Jagged tensor values
155155
156-
offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
156+
offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
157157
158158
y (Tensor): A dense tensor
159159
@@ -174,7 +174,7 @@ def add_docs(method, docstr):
174174
Args:
175175
x_values (Tensor): Jagged tensor values
176176
177-
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
177+
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
178178
179179
y (Tensor): A dense tensor
180180
@@ -195,7 +195,7 @@ def add_docs(method, docstr):
195195
Args:
196196
x_values (Tensor): Jagged tensor values
197197
198-
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
198+
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
199199
200200
y_0 (Tensor): A dense tensor
201201
@@ -218,7 +218,7 @@ def add_docs(method, docstr):
218218
Args:
219219
x_values (Tensor): Jagged tensor values
220220
221-
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
221+
x_offsets (Tensor[]): A list of jagged offset tensors, one for each jagged dimension.
222222
223223
y (Tensor): A dense tensor
224224
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
#!/usr/bin/env python3
2+
3+
# Copyright (c) Meta Platforms, Inc. and affiliates.
4+
# All rights reserved.
5+
# This source code is licensed under the BSD-style license found in the
6+
# LICENSE file in the root directory of this source tree.

fbgemm_gpu/fbgemm_gpu/enums.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
import typing
1010
from typing import Any, Callable, List, Tuple
1111

12+
1213
# Create enums in given namespace with information from query_op
1314
def create_enums(
1415
namespace: typing.Dict[str, Any],

fbgemm_gpu/fbgemm_gpu/quantize_comm.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@
2424
hfp8_to_fp32,
2525
)
2626
from fbgemm_gpu.split_embedding_configs import SparseType
27-
from torch.autograd.profiler import record_function
27+
from torch.autograd.profiler import record_function # usort:skip
28+
2829

2930
logger: logging.Logger = logging.getLogger()
3031

fbgemm_gpu/fbgemm_gpu/split_embedding_inference_converter.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,11 +11,13 @@
1111
import math
1212
from typing import Optional, Tuple
1313

14-
import fbgemm_gpu.split_table_batched_embeddings_ops as split_table_batched_embeddings_ops
14+
import fbgemm_gpu.split_table_batched_embeddings_ops as split_table_batched_embeddings_ops # usort:skip
1515
import numpy as np
1616
import torch
17+
1718
from fbgemm_gpu.split_embedding_configs import QuantizationConfig, SparseType
18-
from torch import nn, Tensor
19+
from torch import nn, Tensor # usort:skip
20+
1921

2022
# TODO: add per-feature based converter option (based on embedding_specs during inference)
2123
# TODO: optimize embedding pruning and quantization latency.

fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515
from typing import Dict, List, NamedTuple, Optional, Tuple, Type, Union
1616

1717
import fbgemm_gpu.split_embedding_codegen_lookup_invokers as invokers
18-
import torch
18+
import torch # usort:skip
1919
from fbgemm_gpu.split_embedding_configs import EmbOptimType as OptimType, SparseType
20-
from torch import nn, Tensor
20+
from torch import nn, Tensor # usort:skip
2121

2222
DEFAULT_ASSOC = 32 if torch.version.hip is None else 64
2323
# Maximum number of times prefetch() can be called without

fbgemm_gpu/fbgemm_gpu/uvm.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
from typing import Optional
1010

1111
import torch
12+
1213
from fbgemm_gpu.enums import create_enums
1314

1415
try:

0 commit comments

Comments
 (0)