Skip to content

Commit 180da0e

Browse files
yinfan98jianan-gu
authored andcommitted
[Misc] Use pytest.mark.skipif in sgl-kernel test (sgl-project#5137)
1 parent 0fa1fe9 commit 180da0e

File tree

3 files changed

+29
-11
lines changed

3 files changed

+29
-11
lines changed

sgl-kernel/README.md

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,10 +158,19 @@ python -m uv build --wheel -Cbuild-dir=build --color=always .
158158

159159
### Testing & Benchmarking
160160

161-
1. Add pytest tests in [tests/](https://github.com/sgl-project/sglang/tree/main/sgl-kernel/tests)
161+
1. Add pytest tests in [tests/](https://github.com/sgl-project/sglang/tree/main/sgl-kernel/tests), if you need to skip some test, please use `@pytest.mark.skipif`
162+
163+
```python
164+
@pytest.mark.skipif(
165+
skip_condition, reason="Nvfp4 Requires compute capability of 10 or above."
166+
)
167+
```
168+
162169
2. Add benchmarks using [triton benchmark](https://triton-lang.org/main/python-api/generated/triton.testing.Benchmark.html) in [benchmark/](https://github.com/sgl-project/sglang/tree/main/sgl-kernel/benchmark)
163170
3. Run test suite
164171

172+
173+
165174
### Release new version
166175

167176
Update version in [pyproject.toml](https://github.com/sgl-project/sglang/blob/main/sgl-kernel/pyproject.toml) and [version.py](https://github.com/sgl-project/sglang/blob/main/sgl-kernel/python/sgl_kernel/version.py)

sgl-kernel/tests/test_fp4_gemm.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,7 @@
22
import torch
33
from sgl_kernel import cutlass_scaled_fp4_mm, scaled_fp4_quant
44

5-
if torch.cuda.get_device_capability() < (10, 0):
6-
pytest.skip(
7-
reason="Nvfp4 Requires compute capability of 10 or above.",
8-
allow_module_level=True,
9-
)
5+
skip_condition = torch.cuda.get_device_capability() < (10, 0)
106

117
DTYPES = [torch.float16, torch.bfloat16]
128
# m, n, k
@@ -108,6 +104,9 @@ def get_ref_results(
108104
return torch.matmul(a_in_dtype, b_in_dtype.t())
109105

110106

107+
@pytest.mark.skipif(
108+
skip_condition, reason="Nvfp4 Requires compute capability of 10 or above."
109+
)
111110
@pytest.mark.parametrize("dtype", DTYPES)
112111
@pytest.mark.parametrize("shape", SHAPES)
113112
@torch.inference_mode()
@@ -149,3 +148,7 @@ def test_nvfp4_gemm(
149148
)
150149

151150
torch.testing.assert_close(out, expected_out.to(dtype=dtype), atol=1e-1, rtol=1e-1)
151+
152+
153+
if __name__ == "__main__":
154+
pytest.main([__file__])

sgl-kernel/tests/test_fp4_quantize.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2,11 +2,7 @@
22
import torch
33
from sgl_kernel import scaled_fp4_quant
44

5-
if torch.cuda.get_device_capability() < (10, 0):
6-
pytest.skip(
7-
reason="Nvfp4 Requires compute capability of 10 or above.",
8-
allow_module_level=True,
9-
)
5+
skip_condition = torch.cuda.get_device_capability() < (10, 0)
106

117
DTYPES = [torch.float16, torch.bfloat16]
128
SHAPES = [(128, 64), (128, 128), (256, 64), (256, 128)]
@@ -115,6 +111,9 @@ def recover_swizzled_scales(scale, m, n):
115111
return result[:m, :scale_n]
116112

117113

114+
@pytest.mark.skipif(
115+
skip_condition, reason="Nvfp4 Requires compute capability of 10 or above."
116+
)
118117
@pytest.mark.parametrize("dtype", DTYPES)
119118
@pytest.mark.parametrize("shape", SHAPES)
120119
@torch.inference_mode()
@@ -140,6 +139,9 @@ def test_quantize_to_fp4(
140139
torch.testing.assert_close(scale_ans, scale_ref)
141140

142141

142+
@pytest.mark.skipif(
143+
skip_condition, reason="Nvfp4 Requires compute capability of 10 or above."
144+
)
143145
@pytest.mark.parametrize("pad_shape", PAD_SHAPES)
144146
@torch.inference_mode()
145147
def test_quantize_to_fp4_padded(pad_shape: tuple[int, int]) -> None:
@@ -162,3 +164,7 @@ def test_quantize_to_fp4_padded(pad_shape: tuple[int, int]) -> None:
162164

163165
torch.testing.assert_close(out_ans, out_ref)
164166
torch.testing.assert_close(scale_ans, scale_ref)
167+
168+
169+
if __name__ == "__main__":
170+
pytest.main([__file__])

0 commit comments

Comments
 (0)