Skip to content

Commit acba33a

Browse files
[Bugfix] Fix the issue where llm.generate cannot be called repeatedly after setting GuidedDecodingParams (#16767)
Signed-off-by: chaunceyjiang <[email protected]> Signed-off-by: Russell Bryant <[email protected]> Co-authored-by: Russell Bryant <[email protected]>
1 parent a114bf2 commit acba33a

File tree

3 files changed

+32
-4
lines changed

3 files changed

+32
-4
lines changed

tests/v1/entrypoints/llm/test_struct_output_generate.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -386,13 +386,21 @@ def test_structured_output_auto_mode(
386386
max_tokens=1000,
387387
guided_decoding=GuidedDecodingParams(json=unsupported_json_schema))
388388

389+
prompts = ("Give an example JSON object for a grade "
390+
"that fits this schema: "
391+
f"{unsupported_json_schema}")
389392
# This would fail with the default of "xgrammar", but in "auto"
390393
# we will handle fallback automatically.
391-
outputs = llm.generate(prompts=("Give an example JSON object for a grade "
392-
"that fits this schema: "
393-
f"{unsupported_json_schema}"),
394+
outputs = llm.generate(prompts=prompts,
394395
sampling_params=sampling_params,
395396
use_tqdm=True)
397+
# Make sure `auto` backend handling doesn't mess up sampling_params
398+
# and that we can reuse it without error.
399+
outputs.extend(
400+
llm.generate(prompts=prompts,
401+
sampling_params=sampling_params,
402+
use_tqdm=True))
403+
396404
assert outputs is not None
397405
for output in outputs:
398406
assert output is not None

vllm/sampling_params.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,17 @@ def backend_options(self) -> list[str]:
7979
return []
8080
return self.backend.split(":")[1].split(",")
8181

82+
def add_option(self, opt_name: str) -> None:
83+
"""Adds an option to the backend options."""
84+
if not self.backend:
85+
self.backend = f":{opt_name}"
86+
elif ":" not in self.backend:
87+
self.backend += f":{opt_name}"
88+
else:
89+
options = set(self.backend_options())
90+
options.add(opt_name)
91+
self.backend = f"{self.backend_name}:{','.join(sorted(options))}"
92+
8293
def no_fallback(self) -> bool:
8394
"""Returns True if the "no-fallback" option is supplied for the guided
8495
decoding backend"""

vllm/v1/engine/processor.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,14 @@ def _validate_structured_output(self, params: SamplingParams) -> None:
155155
raise ValueError(f"Only {supported_backends} structured output is "
156156
"supported in V1.")
157157
if params.guided_decoding.backend:
158-
if params.guided_decoding.backend != engine_level_backend:
158+
# Request-level backend selection is not supported in V1.
159+
# The values may differ if `params` is reused and was set
160+
# to a specific backend based on `auto` behavior in a previous
161+
# request. We remember that it was set as a result of `auto`
162+
# using the `_auto` option set on the backend in the params.
163+
if (params.guided_decoding.backend != engine_level_backend
164+
and not (engine_level_backend == "auto" and "_auto"
165+
in params.guided_decoding.backend_options())):
159166
raise ValueError(
160167
"Request-level structured output backend selection is no "
161168
"longer supported. The request specified "
@@ -190,6 +197,8 @@ def _validate_structured_output(self, params: SamplingParams) -> None:
190197
# The request includes some jsonschema feature(s) that
191198
# are not supported in xgrammar. Fall back to guidance.
192199
params.guided_decoding.backend = "guidance"
200+
# Remember that this backend was set automatically
201+
params.guided_decoding.add_option("_auto")
193202

194203
def process_inputs(
195204
self,

0 commit comments

Comments
 (0)