diff --git a/vllm/config.py b/vllm/config.py index 9a98a7fbc90b..c9b32e5dc1f8 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -951,9 +951,9 @@ def maybe_create_spec_config( # Verify provided value doesn't exceed the maximum # supported by the draft model. raise ValueError( - "Expected both speculative_model and " - "num_speculative_tokens to be provided, but found " - f"{speculative_model=} and {num_speculative_tokens=}.") + "This speculative model supports a maximum of " + f"num_speculative_tokens={n_predict}, but " + f"{num_speculative_tokens=} was provided.") draft_model_config.max_model_len = ( SpeculativeConfig._maybe_override_draft_max_model_len(