Skip to content

vllm.entrypoints.serve.instrumentator.server_info

PydanticVllmConfig module-attribute

PydanticVllmConfig = TypeAdapter(VllmConfig)

logger module-attribute

logger = init_logger(__name__)

router module-attribute

router = APIRouter()

_get_vllm_env_vars

_get_vllm_env_vars()
Source code in vllm/entrypoints/serve/instrumentator/server_info.py
def _get_vllm_env_vars():
    from vllm.config.utils import normalize_value

    vllm_envs = {}
    for key in dir(envs):
        if key.startswith("VLLM_") and "KEY" not in key:
            value = getattr(envs, key, None)
            if value is not None:
                value = normalize_value(value)
                vllm_envs[key] = value
    return vllm_envs

attach_router

attach_router(app: FastAPI)
Source code in vllm/entrypoints/serve/instrumentator/server_info.py
def attach_router(app: FastAPI):
    if not envs.VLLM_SERVER_DEV_MODE:
        return
    app.include_router(router)

show_server_info async

show_server_info(
    raw_request: Request,
    config_format: Annotated[
        Literal["text", "json"], Query()
    ] = "text",
)
Source code in vllm/entrypoints/serve/instrumentator/server_info.py
@router.get("/server_info")
async def show_server_info(
    raw_request: Request,
    config_format: Annotated[Literal["text", "json"], Query()] = "text",
):
    vllm_config: VllmConfig = raw_request.app.state.vllm_config
    server_info = {
        "vllm_config": (
            str(vllm_config)
            if config_format == "text"
            else PydanticVllmConfig.dump_python(vllm_config, mode="json", fallback=str)
        ),
        # fallback=str is needed to handle e.g. torch.dtype
        "vllm_env": _get_vllm_env_vars(),
    }
    return JSONResponse(content=server_info)