Skip to content

Commit

Permalink
[Misc] Add vLLM version getter to utils (vllm-project#5098)
Browse files Browse the repository at this point in the history
  • Loading branch information
DarkLight1337 authored and joerunde committed Jun 13, 2024
1 parent d2d4958 commit e7c63b7
Show file tree
Hide file tree
Showing 7 changed files with 13 additions and 11 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ def find_version(filepath: str) -> str:


def get_vllm_version() -> str:
version = find_version(get_path("vllm", "__init__.py"))
version = find_version(get_path("vllm", "version.py"))

if _is_cuda():
cuda_version = str(get_nvcc_cuda_version())
Expand Down
3 changes: 2 additions & 1 deletion vllm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@
from vllm.pooling_params import PoolingParams
from vllm.sampling_params import SamplingParams

__version__ = "0.5.0"
from .version import __version__

__all__ = [
"__version__",
"LLM",
"ModelRegistry",
"PromptStrictInputs",
Expand Down
4 changes: 2 additions & 2 deletions vllm/engine/llm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

from transformers import GenerationConfig, PreTrainedTokenizer

import vllm
from vllm.config import (CacheConfig, DecodingConfig, DeviceConfig, LoadConfig,
LoRAConfig, ModelConfig, ParallelConfig,
SchedulerConfig, SpeculativeConfig,
Expand Down Expand Up @@ -38,6 +37,7 @@
from vllm.usage.usage_lib import (UsageContext, is_usage_stats_enabled,
usage_message)
from vllm.utils import Counter
from vllm.version import __version__ as VLLM_VERSION

logger = init_logger(__name__)
_LOCAL_LOGGING_INTERVAL_SEC = 5
Expand Down Expand Up @@ -169,7 +169,7 @@ def __init__(
"enforce_eager=%s, kv_cache_dtype=%s, "
"quantization_param_path=%s, device_config=%s, "
"decoding_config=%r, seed=%d, served_model_name=%s)",
vllm.__version__,
VLLM_VERSION,
model_config.model,
speculative_config,
model_config.tokenizer,
Expand Down
6 changes: 3 additions & 3 deletions vllm/entrypoints/openai/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from prometheus_client import make_asgi_app
from starlette.routing import Mount

import vllm
import vllm.envs as envs
from vllm.engine.arg_utils import AsyncEngineArgs
from vllm.engine.async_llm_engine import AsyncLLMEngine
Expand All @@ -31,6 +30,7 @@
from vllm.logger import init_logger
from vllm.tgis_utils.args import add_tgis_args, postprocess_tgis_args
from vllm.usage.usage_lib import UsageContext
from vllm.version import __version__ as VLLM_VERSION

TIMEOUT_KEEP_ALIVE = 5 # seconds

Expand Down Expand Up @@ -106,7 +106,7 @@ async def show_available_models():

@app.get("/version")
async def show_version():
ver = {"version": vllm.__version__}
ver = {"version": VLLM_VERSION}
return JSONResponse(content=ver)


Expand Down Expand Up @@ -187,7 +187,7 @@ async def authentication(request: Request, call_next):
raise ValueError(f"Invalid middleware {middleware}. "
f"Must be a function or a class.")

logger.info("vLLM API server version %s", vllm.__version__)
logger.info("vLLM API server version %s", VLLM_VERSION)
logger.info("args: %s", args)

if args.served_model_name is not None:
Expand Down
4 changes: 2 additions & 2 deletions vllm/entrypoints/openai/run_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

import aiohttp

import vllm
from vllm.engine.arg_utils import AsyncEngineArgs, nullable_str
from vllm.engine.async_llm_engine import AsyncLLMEngine
from vllm.entrypoints.openai.protocol import (BatchRequestInput,
Expand All @@ -15,6 +14,7 @@
from vllm.logger import init_logger
from vllm.usage.usage_lib import UsageContext
from vllm.utils import random_uuid
from vllm.version import __version__ as VLLM_VERSION

logger = init_logger(__name__)

Expand Down Expand Up @@ -135,7 +135,7 @@ async def main(args):
if __name__ == "__main__":
args = parse_args()

logger.info("vLLM API server version %s", vllm.__version__)
logger.info("vLLM API server version %s", VLLM_VERSION)
logger.info("args: %s", args)

asyncio.run(main(args))
4 changes: 2 additions & 2 deletions vllm/usage/usage_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
import torch

import vllm.envs as envs
from vllm.version import __version__ as VLLM_VERSION

_config_home = envs.VLLM_CONFIG_ROOT
_USAGE_STATS_JSON_PATH = os.path.join(_config_home, "vllm/usage_stats.json")
Expand Down Expand Up @@ -163,9 +164,8 @@ def _report_usage_once(self, model_architecture: str,
])

# vLLM information
import vllm # delayed import to prevent circular import
self.context = usage_context.value
self.vllm_version = vllm.__version__
self.vllm_version = VLLM_VERSION
self.model_architecture = model_architecture

# Metadata
Expand Down
1 change: 1 addition & 0 deletions vllm/version.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__version__ = "0.5.0"

0 comments on commit e7c63b7

Please sign in to comment.