Skip to content

Commit c2e6ce6

Browse files
jeejeeleelk-chen
authored andcommitted
[Misc] Remove the chunked prefill warning for LoRA (vllm-project#16925)
Signed-off-by: Jee Jee Li <[email protected]>
1 parent c94d4c0 commit c2e6ce6

File tree

1 file changed

+0
-9
lines changed

1 file changed

+0
-9
lines changed

vllm/config.py

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2686,13 +2686,6 @@ def verify_with_model_config(self, model_config: ModelConfig):
26862686
elif isinstance(self.lora_dtype, str):
26872687
self.lora_dtype = getattr(torch, self.lora_dtype)
26882688

2689-
def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig):
2690-
# Reminder: Please update docs/source/features/compatibility_matrix.md
2691-
# If the feature combo become valid
2692-
if scheduler_config.chunked_prefill_enabled:
2693-
logger.warning("LoRA with chunked prefill is still experimental "
2694-
"and may be unstable.")
2695-
26962689
def verify_lora_support(self):
26972690
if self.long_lora_scaling_factors is not None and envs.VLLM_USE_V1:
26982691
raise ValueError(
@@ -3820,8 +3813,6 @@ def __post_init__(self):
38203813
if self.lora_config:
38213814
self.lora_config.verify_with_cache_config(self.cache_config)
38223815
self.lora_config.verify_with_model_config(self.model_config)
3823-
self.lora_config.verify_with_scheduler_config(
3824-
self.scheduler_config)
38253816
self.lora_config.verify_lora_support()
38263817
if self.prompt_adapter_config:
38273818
self.prompt_adapter_config.verify_with_model_config(

0 commit comments

Comments
 (0)