Skip to content

Commit e3a5304

Browse files
authored
Add AMD MI300x Nightly Testing. (#5861)
1 parent 28b26db commit e3a5304

File tree

3 files changed

+249
-0
lines changed

3 files changed

+249
-0
lines changed
Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,54 @@
1+
name: Nightly Test (AMD)
2+
3+
on:
4+
schedule:
5+
- cron: '0 0 * * *'
6+
push:
7+
branches:
8+
- main
9+
paths:
10+
- "python/sglang/version.py"
11+
workflow_dispatch:
12+
13+
concurrency:
14+
group: nightly-test-${{ github.ref }}
15+
cancel-in-progress: true
16+
17+
jobs:
18+
nightly-test:
19+
if: github.repository == 'sgl-project/sglang' || github.event_name == 'pull_request'
20+
runs-on: linux-mi300-gpu-2
21+
steps:
22+
- name: Checkout code
23+
uses: actions/checkout@v4
24+
25+
- name: Setup docker
26+
run: |
27+
# Ensure GPU isolation if pod is part of kubernetes setup with DEVICE_FLAG.
28+
if [ -f "/etc/podinfo/gha-render-devices" ]; then
29+
DEVICE_FLAG=$(cat /etc/podinfo/gha-render-devices)
30+
else
31+
DEVICE_FLAG="--device /dev/dri"
32+
fi
33+
touch github_summary.md
34+
docker pull ghcr.io/saienduri/sglang-aiter-v0.1.1:428
35+
docker run -dt --user root --device=/dev/kfd $DEVICE_FLAG \
36+
-v ${{ github.workspace }}:/sglang-checkout --ipc=host --group-add video \
37+
--cap-add=SYS_PTRACE -e HF_TOKEN=${HF_TOKEN} --security-opt seccomp=unconfined \
38+
-w /sglang-checkout --name ci_sglang \
39+
ghcr.io/saienduri/sglang-aiter-v0.1.1:428
40+
41+
- name: Install dependencies
42+
run: |
43+
docker exec ci_sglang pip install --upgrade pip
44+
docker exec ci_sglang pip uninstall sgl-kernel -y || true
45+
docker exec -w /sglang-checkout/sgl-kernel ci_sglang bash -c "rm -f pyproject.toml && mv pyproject_rocm.toml pyproject.toml && python3 setup_rocm.py install"
46+
docker exec ci_sglang pip install -e "python[dev_hip]"
47+
48+
docker exec -w / ci_sglang git clone https://github.com/merrymercy/human-eval.git
49+
docker exec -w /human-eval ci_sglang pip install -e .
50+
51+
- name: Nightly Test
52+
run: |
53+
docker exec -w /sglang-checkout/test/srt -e SGLANG_IS_IN_CI=1 -e GITHUB_STEP_SUMMARY="/sglang-checkout/github_summary.md" ci_sglang python3 run_suite.py --suite nightly-amd --timeout-per-file 7200
54+
echo "$(<github_summary.md )" >> $GITHUB_STEP_SUMMARY

test/srt/run_suite.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,9 @@ class TestFile:
100100
"nightly": [
101101
TestFile("test_nightly_gsm8k_eval.py"),
102102
],
103+
"nightly-amd": [
104+
TestFile("test_nightly_gsm8k_eval_amd.py"),
105+
],
103106
"vllm_dependency_test": [
104107
TestFile("test_vllm_dependency.py"),
105108
TestFile("test_awq.py"),
Lines changed: 192 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,192 @@
1+
import json
2+
import os
3+
import unittest
4+
import warnings
5+
from datetime import datetime
6+
from types import SimpleNamespace
7+
8+
from sglang.srt.utils import kill_process_tree
9+
from sglang.test.run_eval import run_eval
10+
from sglang.test.test_utils import (
11+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1,
12+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2,
13+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1,
14+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2,
15+
DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
16+
DEFAULT_URL_FOR_TEST,
17+
is_in_ci,
18+
popen_launch_server,
19+
write_github_step_summary,
20+
)
21+
22+
MODEL_SCORE_THRESHOLDS = {
23+
"meta-llama/Llama-3.1-8B-Instruct": 0.82,
24+
"mistralai/Mistral-7B-Instruct-v0.3": 0.56,
25+
"deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct": 0.85,
26+
"meta-llama/Llama-3.1-70B-Instruct": 0.95,
27+
"mistralai/Mixtral-8x7B-Instruct-v0.1": 0.64,
28+
"Qwen/Qwen2-57B-A14B-Instruct": 0.86,
29+
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8": 0.81,
30+
"neuralmagic/Mistral-7B-Instruct-v0.3-FP8": 0.54,
31+
"neuralmagic/Meta-Llama-3.1-70B-Instruct-FP8": 0.94,
32+
"neuralmagic/Qwen2-72B-Instruct-FP8": 0.94,
33+
"neuralmagic/Qwen2-57B-A14B-Instruct-FP8": 0.82,
34+
}
35+
36+
# Models currently failing on AMD MI300x.
37+
failing_models = {
38+
"google/gemma-2-27b-it",
39+
"neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8",
40+
"neuralmagic/gemma-2-2b-it-FP8",
41+
"neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8",
42+
}
43+
44+
45+
def remove_failing_models(model_str):
46+
models = model_str.split(",")
47+
filtered = [m for m in models if m not in failing_models]
48+
return ",".join(filtered)
49+
50+
51+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1 = remove_failing_models(
52+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1
53+
)
54+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2 = remove_failing_models(
55+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2
56+
)
57+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1 = remove_failing_models(
58+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1
59+
)
60+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2 = remove_failing_models(
61+
DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2
62+
)
63+
64+
65+
def parse_models(model_string):
66+
return [model.strip() for model in model_string.split(",") if model.strip()]
67+
68+
69+
def popen_launch_server_wrapper(base_url, model, is_tp2):
70+
other_args = ["--log-level-http", "warning", "--trust-remote-code"]
71+
if is_tp2:
72+
other_args.extend(["--tp", "2"])
73+
74+
process = popen_launch_server(
75+
model,
76+
base_url,
77+
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
78+
other_args=other_args,
79+
)
80+
return process
81+
82+
83+
def write_results_to_json(model, metrics, mode="a"):
84+
result = {
85+
"timestamp": datetime.now().isoformat(),
86+
"model": model,
87+
"metrics": metrics,
88+
"score": metrics["score"],
89+
}
90+
91+
existing_results = []
92+
if mode == "a" and os.path.exists("results.json"):
93+
try:
94+
with open("results.json", "r") as f:
95+
existing_results = json.load(f)
96+
except json.JSONDecodeError:
97+
existing_results = []
98+
99+
if isinstance(existing_results, list):
100+
existing_results.append(result)
101+
else:
102+
existing_results = [result]
103+
104+
with open("results.json", "w") as f:
105+
json.dump(existing_results, f, indent=2)
106+
107+
108+
def check_model_scores(results):
109+
failed_models = []
110+
summary = " | model | score | threshold |\n"
111+
summary += "| ----- | ----- | --------- |\n"
112+
113+
for model, score in results:
114+
threshold = MODEL_SCORE_THRESHOLDS.get(model)
115+
if threshold is None:
116+
print(f"Warning: No threshold defined for model {model}")
117+
continue
118+
119+
if score < threshold:
120+
failed_models.append(
121+
f"\nScore Check Failed: {model}\n"
122+
f"Model {model} score ({score:.4f}) is below threshold ({threshold:.4f})"
123+
)
124+
125+
line = f"| {model} | {score} | {threshold} |\n"
126+
summary += line
127+
128+
print(summary)
129+
130+
if is_in_ci():
131+
write_github_step_summary(f"### TestNightlyGsm8KEval\n{summary}")
132+
133+
if failed_models:
134+
raise AssertionError("\n".join(failed_models))
135+
136+
137+
# Do not use `CustomTestCase` since `test_mgsm_en_all_models` does not want retry
138+
class TestNightlyGsm8KEval(unittest.TestCase):
139+
@classmethod
140+
def setUpClass(cls):
141+
cls.model_groups = [
142+
(parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP1), False, False),
143+
(parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_TP2), False, True),
144+
(parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP1), True, False),
145+
(parse_models(DEFAULT_MODEL_NAME_FOR_NIGHTLY_EVAL_FP8_TP2), True, True),
146+
]
147+
cls.base_url = DEFAULT_URL_FOR_TEST
148+
149+
def test_mgsm_en_all_models(self):
150+
warnings.filterwarnings(
151+
"ignore", category=ResourceWarning, message="unclosed.*socket"
152+
)
153+
is_first = True
154+
all_results = []
155+
156+
for model_group, is_fp8, is_tp2 in self.model_groups:
157+
for model in model_group:
158+
with self.subTest(model=model):
159+
process = popen_launch_server_wrapper(self.base_url, model, is_tp2)
160+
161+
args = SimpleNamespace(
162+
base_url=self.base_url,
163+
model=model,
164+
eval_name="mgsm_en",
165+
num_examples=None,
166+
num_threads=1024,
167+
)
168+
169+
metrics = run_eval(args)
170+
print(
171+
f"{'=' * 42}\n{model} - metrics={metrics} score={metrics['score']}\n{'=' * 42}\n"
172+
)
173+
174+
write_results_to_json(model, metrics, "w" if is_first else "a")
175+
is_first = False
176+
177+
all_results.append((model, metrics["score"]))
178+
kill_process_tree(process.pid)
179+
180+
try:
181+
with open("results.json", "r") as f:
182+
print("\nFinal Results from results.json:")
183+
print(json.dumps(json.load(f), indent=2))
184+
except Exception as e:
185+
print(f"Error reading results.json: {e}")
186+
187+
# Check all scores after collecting all results
188+
check_model_scores(all_results)
189+
190+
191+
if __name__ == "__main__":
192+
unittest.main()

0 commit comments

Comments
 (0)