Skip to content

vllm.entrypoints.openai.engine.protocol

AnyResponseFormat module-attribute

AnyStructuralTagResponseFormat module-attribute

AudioResponseFormat module-attribute

AudioResponseFormat: TypeAlias = Literal[
    "json", "text", "srt", "verbose_json", "vtt"
]

LogitsProcessors module-attribute

LogitsProcessors = list[str | LogitsProcessorConstructor]

ResponseInputOutputItem module-attribute

ResponseInputOutputItem: TypeAlias = (
    ResponseInputItemParam | ResponseOutputItem
)

ResponseInputOutputMessage module-attribute

StreamingResponsesResponse module-attribute

StreamingResponsesResponse: TypeAlias = (
    ResponseCreatedEvent
    | ResponseInProgressEvent
    | ResponseCompletedEvent
    | ResponseOutputItemAddedEvent
    | ResponseOutputItemDoneEvent
    | ResponseContentPartAddedEvent
    | ResponseContentPartDoneEvent
    | ResponseReasoningTextDeltaEvent
    | ResponseReasoningTextDoneEvent
    | ResponseReasoningPartAddedEvent
    | ResponseReasoningPartDoneEvent
    | ResponseCodeInterpreterCallInProgressEvent
    | ResponseCodeInterpreterCallCodeDeltaEvent
    | ResponseWebSearchCallInProgressEvent
    | ResponseWebSearchCallSearchingEvent
    | ResponseWebSearchCallCompletedEvent
    | ResponseCodeInterpreterCallCodeDoneEvent
    | ResponseCodeInterpreterCallInterpretingEvent
    | ResponseCodeInterpreterCallCompletedEvent
    | ResponseMcpCallArgumentsDeltaEvent
    | ResponseMcpCallArgumentsDoneEvent
    | ResponseMcpCallInProgressEvent
    | ResponseMcpCallCompletedEvent
)

TranscriptionResponseVariant module-attribute

TranscriptionResponseVariant: TypeAlias = (
    TranscriptionResponse | TranscriptionResponseVerbose
)

TranslationResponseVariant module-attribute

TranslationResponseVariant: TypeAlias = (
    TranslationResponse | TranslationResponseVerbose
)

_LONG_INFO module-attribute

_LONG_INFO = iinfo(long)

logger module-attribute

logger = init_logger(__name__)

CompletionLogProbs

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class CompletionLogProbs(OpenAIBaseModel):
    text_offset: list[int] = Field(default_factory=list)
    token_logprobs: list[float | None] = Field(default_factory=list)
    tokens: list[str] = Field(default_factory=list)
    top_logprobs: list[dict[str, float] | None] = Field(default_factory=list)

text_offset class-attribute instance-attribute

text_offset: list[int] = Field(default_factory=list)

token_logprobs class-attribute instance-attribute

token_logprobs: list[float | None] = Field(
    default_factory=list
)

tokens class-attribute instance-attribute

tokens: list[str] = Field(default_factory=list)

top_logprobs class-attribute instance-attribute

top_logprobs: list[dict[str, float] | None] = Field(
    default_factory=list
)

CompletionRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class CompletionRequest(OpenAIBaseModel):
    # Ordered by official OpenAI API documentation
    # https://platform.openai.com/docs/api-reference/completions/create
    model: str | None = None
    prompt: list[int] | list[list[int]] | str | list[str] | None = None
    echo: bool | None = False
    frequency_penalty: float | None = 0.0
    logit_bias: dict[str, float] | None = None
    logprobs: int | None = None
    max_tokens: int | None = 16
    n: int = 1
    presence_penalty: float | None = 0.0
    seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
    stop: str | list[str] | None = []
    stream: bool | None = False
    stream_options: StreamOptions | None = None
    suffix: str | None = None
    temperature: float | None = None
    top_p: float | None = None
    user: str | None = None

    # --8<-- [start:completion-sampling-params]
    use_beam_search: bool = False
    top_k: int | None = None
    min_p: float | None = None
    repetition_penalty: float | None = None
    length_penalty: float = 1.0
    stop_token_ids: list[int] | None = []
    include_stop_str_in_output: bool = False
    ignore_eos: bool = False
    min_tokens: int = 0
    skip_special_tokens: bool = True
    spaces_between_special_tokens: bool = True
    truncate_prompt_tokens: Annotated[int, Field(ge=-1, le=_LONG_INFO.max)] | None = (
        None
    )
    allowed_token_ids: list[int] | None = None
    prompt_logprobs: int | None = None
    # --8<-- [end:completion-sampling-params]

    # --8<-- [start:completion-extra-params]
    prompt_embeds: bytes | list[bytes] | None = None
    add_special_tokens: bool = Field(
        default=True,
        description=(
            "If true (the default), special tokens (e.g. BOS) will be added to "
            "the prompt."
        ),
    )
    response_format: AnyResponseFormat | None = Field(
        default=None,
        description=(
            "Similar to chat completion, this parameter specifies the format "
            "of output. Only {'type': 'json_object'}, {'type': 'json_schema'}"
            ", {'type': 'structural_tag'}, or {'type': 'text' } is supported."
        ),
    )
    structured_outputs: StructuredOutputsParams | None = Field(
        default=None,
        description="Additional kwargs for structured outputs",
    )
    priority: int = Field(
        default=0,
        description=(
            "The priority of the request (lower means earlier handling; "
            "default: 0). Any priority other than 0 will raise an error "
            "if the served model does not use priority scheduling."
        ),
    )
    request_id: str = Field(
        default_factory=random_uuid,
        description=(
            "The request_id related to this request. If the caller does "
            "not set it, a random_uuid will be generated. This id is used "
            "through out the inference process and return in response."
        ),
    )
    logits_processors: LogitsProcessors | None = Field(
        default=None,
        description=(
            "A list of either qualified names of logits processors, or "
            "constructor objects, to apply when sampling. A constructor is "
            "a JSON object with a required 'qualname' field specifying the "
            "qualified name of the processor class/factory, and optional "
            "'args' and 'kwargs' fields containing positional and keyword "
            "arguments. For example: {'qualname': "
            "'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': "
            "{'param': 'value'}}."
        ),
    )

    return_tokens_as_token_ids: bool | None = Field(
        default=None,
        description=(
            "If specified with 'logprobs', tokens are represented "
            " as strings of the form 'token_id:{token_id}' so that tokens "
            "that are not JSON-encodable can be identified."
        ),
    )
    return_token_ids: bool | None = Field(
        default=None,
        description=(
            "If specified, the result will include token IDs alongside the "
            "generated text. In streaming mode, prompt_token_ids is included "
            "only in the first chunk, and token_ids contains the delta tokens "
            "for each chunk. This is useful for debugging or when you "
            "need to map generated text back to input tokens."
        ),
    )

    cache_salt: str | None = Field(
        default=None,
        description=(
            "If specified, the prefix cache will be salted with the provided "
            "string to prevent an attacker to guess prompts in multi-user "
            "environments. The salt should be random, protected from "
            "access by 3rd parties, and long enough to be "
            "unpredictable (e.g., 43 characters base64-encoded, corresponding "
            "to 256 bit)."
        ),
    )

    kv_transfer_params: dict[str, Any] | None = Field(
        default=None,
        description="KVTransfer parameters used for disaggregated serving.",
    )

    vllm_xargs: dict[str, str | int | float] | None = Field(
        default=None,
        description=(
            "Additional request parameters with string or "
            "numeric values, used by custom extensions."
        ),
    )

    # --8<-- [end:completion-extra-params]

    # Default sampling parameters for completion requests
    _DEFAULT_SAMPLING_PARAMS: dict = {
        "repetition_penalty": 1.0,
        "temperature": 1.0,
        "top_p": 1.0,
        "top_k": 0,
        "min_p": 0.0,
    }

    def to_beam_search_params(
        self,
        max_tokens: int,
        default_sampling_params: dict | None = None,
    ) -> BeamSearchParams:
        if default_sampling_params is None:
            default_sampling_params = {}
        n = self.n if self.n is not None else 1

        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get("temperature", 1.0)

        return BeamSearchParams(
            beam_width=n,
            max_tokens=max_tokens,
            ignore_eos=self.ignore_eos,
            temperature=temperature,
            length_penalty=self.length_penalty,
            include_stop_str_in_output=self.include_stop_str_in_output,
        )

    def to_sampling_params(
        self,
        max_tokens: int,
        logits_processor_pattern: str | None,
        default_sampling_params: dict | None = None,
    ) -> SamplingParams:
        if default_sampling_params is None:
            default_sampling_params = {}

        # Default parameters
        if (repetition_penalty := self.repetition_penalty) is None:
            repetition_penalty = default_sampling_params.get(
                "repetition_penalty",
                self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
            )
        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get(
                "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
            )
        if (top_p := self.top_p) is None:
            top_p = default_sampling_params.get(
                "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
            )
        if (top_k := self.top_k) is None:
            top_k = default_sampling_params.get(
                "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
            )
        if (min_p := self.min_p) is None:
            min_p = default_sampling_params.get(
                "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
            )

        prompt_logprobs = self.prompt_logprobs
        if prompt_logprobs is None and self.echo:
            prompt_logprobs = self.logprobs

        echo_without_generation = self.echo and self.max_tokens == 0

        response_format = self.response_format
        if response_format is not None:
            # If structured outputs wasn't already enabled,
            # we must enable it for these features to work
            if self.structured_outputs is None:
                self.structured_outputs = StructuredOutputsParams()

            # Set structured output params for response format
            if response_format.type == "json_object":
                self.structured_outputs.json_object = True
            elif response_format.type == "json_schema":
                json_schema = response_format.json_schema
                assert json_schema is not None
                self.structured_outputs.json = json_schema.json_schema
            elif response_format.type == "structural_tag":
                structural_tag = response_format
                assert structural_tag is not None and isinstance(
                    structural_tag,
                    (
                        LegacyStructuralTagResponseFormat,
                        StructuralTagResponseFormat,
                    ),
                )
                s_tag_obj = structural_tag.model_dump(by_alias=True)
                self.structured_outputs.structural_tag = json.dumps(s_tag_obj)

        extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {}
        if self.kv_transfer_params:
            # Pass in kv_transfer_params via extra_args
            extra_args["kv_transfer_params"] = self.kv_transfer_params
        return SamplingParams.from_optional(
            n=self.n,
            presence_penalty=self.presence_penalty,
            frequency_penalty=self.frequency_penalty,
            repetition_penalty=repetition_penalty,
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            min_p=min_p,
            seed=self.seed,
            stop=self.stop,
            stop_token_ids=self.stop_token_ids,
            logprobs=self.logprobs,
            ignore_eos=self.ignore_eos,
            max_tokens=max_tokens if not echo_without_generation else 1,
            min_tokens=self.min_tokens,
            prompt_logprobs=prompt_logprobs,
            skip_special_tokens=self.skip_special_tokens,
            spaces_between_special_tokens=self.spaces_between_special_tokens,
            include_stop_str_in_output=self.include_stop_str_in_output,
            logits_processors=get_logits_processors(
                self.logits_processors, logits_processor_pattern
            ),
            truncate_prompt_tokens=self.truncate_prompt_tokens,
            output_kind=RequestOutputKind.DELTA
            if self.stream
            else RequestOutputKind.FINAL_ONLY,
            structured_outputs=self.structured_outputs,
            logit_bias=self.logit_bias,
            allowed_token_ids=self.allowed_token_ids,
            extra_args=extra_args or None,
            skip_clone=True,  # Created fresh per request, safe to skip clone
        )

    @model_validator(mode="before")
    @classmethod
    def check_structured_outputs_count(cls, data):
        if data.get("structured_outputs", None) is None:
            return data

        structured_outputs_kwargs = data["structured_outputs"]
        count = sum(
            structured_outputs_kwargs.get(k) is not None
            for k in ("json", "regex", "choice")
        )
        if count > 1:
            raise VLLMValidationError(
                "You can only use one kind of constraints for structured "
                "outputs ('json', 'regex' or 'choice').",
                parameter="structured_outputs",
            )
        return data

    @model_validator(mode="before")
    @classmethod
    def check_logprobs(cls, data):
        if (prompt_logprobs := data.get("prompt_logprobs")) is not None:
            if data.get("stream") and (prompt_logprobs > 0 or prompt_logprobs == -1):
                raise VLLMValidationError(
                    "`prompt_logprobs` are not available when `stream=True`.",
                    parameter="prompt_logprobs",
                )

            if prompt_logprobs < 0 and prompt_logprobs != -1:
                raise VLLMValidationError(
                    "`prompt_logprobs` must be a positive value or -1.",
                    parameter="prompt_logprobs",
                    value=prompt_logprobs,
                )
        if (logprobs := data.get("logprobs")) is not None and logprobs < 0:
            raise VLLMValidationError(
                "`logprobs` must be a positive value.",
                parameter="logprobs",
                value=logprobs,
            )

        return data

    @model_validator(mode="before")
    @classmethod
    def validate_stream_options(cls, data):
        if data.get("stream_options") and not data.get("stream"):
            raise VLLMValidationError(
                "Stream options can only be defined when `stream=True`.",
                parameter="stream_options",
            )

        return data

    @model_validator(mode="before")
    @classmethod
    def validate_prompt_and_prompt_embeds(cls, data):
        prompt = data.get("prompt")
        prompt_embeds = data.get("prompt_embeds")

        prompt_is_empty = prompt is None or (isinstance(prompt, str) and prompt == "")
        embeds_is_empty = prompt_embeds is None or (
            isinstance(prompt_embeds, list) and len(prompt_embeds) == 0
        )

        if prompt_is_empty and embeds_is_empty:
            raise ValueError(
                "Either prompt or prompt_embeds must be provided and non-empty."
            )

        return data

    @model_validator(mode="before")
    @classmethod
    def check_cache_salt_support(cls, data):
        if data.get("cache_salt") is not None and (
            not isinstance(data["cache_salt"], str) or not data["cache_salt"]
        ):
            raise ValueError(
                "Parameter 'cache_salt' must be a non-empty string if provided."
            )
        return data

_DEFAULT_SAMPLING_PARAMS class-attribute instance-attribute

_DEFAULT_SAMPLING_PARAMS: dict = {
    "repetition_penalty": 1.0,
    "temperature": 1.0,
    "top_p": 1.0,
    "top_k": 0,
    "min_p": 0.0,
}

add_special_tokens class-attribute instance-attribute

add_special_tokens: bool = Field(
    default=True,
    description="If true (the default), special tokens (e.g. BOS) will be added to the prompt.",
)

allowed_token_ids class-attribute instance-attribute

allowed_token_ids: list[int] | None = None

cache_salt class-attribute instance-attribute

cache_salt: str | None = Field(
    default=None,
    description="If specified, the prefix cache will be salted with the provided string to prevent an attacker to guess prompts in multi-user environments. The salt should be random, protected from access by 3rd parties, and long enough to be unpredictable (e.g., 43 characters base64-encoded, corresponding to 256 bit).",
)

echo class-attribute instance-attribute

echo: bool | None = False

frequency_penalty class-attribute instance-attribute

frequency_penalty: float | None = 0.0

ignore_eos class-attribute instance-attribute

ignore_eos: bool = False

include_stop_str_in_output class-attribute instance-attribute

include_stop_str_in_output: bool = False

kv_transfer_params class-attribute instance-attribute

kv_transfer_params: dict[str, Any] | None = Field(
    default=None,
    description="KVTransfer parameters used for disaggregated serving.",
)

length_penalty class-attribute instance-attribute

length_penalty: float = 1.0

logit_bias class-attribute instance-attribute

logit_bias: dict[str, float] | None = None

logits_processors class-attribute instance-attribute

logits_processors: LogitsProcessors | None = Field(
    default=None,
    description="A list of either qualified names of logits processors, or constructor objects, to apply when sampling. A constructor is a JSON object with a required 'qualname' field specifying the qualified name of the processor class/factory, and optional 'args' and 'kwargs' fields containing positional and keyword arguments. For example: {'qualname': 'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': {'param': 'value'}}.",
)

logprobs class-attribute instance-attribute

logprobs: int | None = None

max_tokens class-attribute instance-attribute

max_tokens: int | None = 16

min_p class-attribute instance-attribute

min_p: float | None = None

min_tokens class-attribute instance-attribute

min_tokens: int = 0

model class-attribute instance-attribute

model: str | None = None

n class-attribute instance-attribute

n: int = 1

presence_penalty class-attribute instance-attribute

presence_penalty: float | None = 0.0

priority class-attribute instance-attribute

priority: int = Field(
    default=0,
    description="The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.",
)

prompt class-attribute instance-attribute

prompt: (
    list[int] | list[list[int]] | str | list[str] | None
) = None

prompt_embeds class-attribute instance-attribute

prompt_embeds: bytes | list[bytes] | None = None

prompt_logprobs class-attribute instance-attribute

prompt_logprobs: int | None = None

repetition_penalty class-attribute instance-attribute

repetition_penalty: float | None = None

request_id class-attribute instance-attribute

request_id: str = Field(
    default_factory=random_uuid,
    description="The request_id related to this request. If the caller does not set it, a random_uuid will be generated. This id is used through out the inference process and return in response.",
)

response_format class-attribute instance-attribute

response_format: AnyResponseFormat | None = Field(
    default=None,
    description="Similar to chat completion, this parameter specifies the format of output. Only {'type': 'json_object'}, {'type': 'json_schema'}, {'type': 'structural_tag'}, or {'type': 'text' } is supported.",
)

return_token_ids class-attribute instance-attribute

return_token_ids: bool | None = Field(
    default=None,
    description="If specified, the result will include token IDs alongside the generated text. In streaming mode, prompt_token_ids is included only in the first chunk, and token_ids contains the delta tokens for each chunk. This is useful for debugging or when you need to map generated text back to input tokens.",
)

return_tokens_as_token_ids class-attribute instance-attribute

return_tokens_as_token_ids: bool | None = Field(
    default=None,
    description="If specified with 'logprobs', tokens are represented  as strings of the form 'token_id:{token_id}' so that tokens that are not JSON-encodable can be identified.",
)

seed class-attribute instance-attribute

seed: int | None = Field(None, ge=min, le=max)

skip_special_tokens class-attribute instance-attribute

skip_special_tokens: bool = True

spaces_between_special_tokens class-attribute instance-attribute

spaces_between_special_tokens: bool = True

stop class-attribute instance-attribute

stop: str | list[str] | None = []

stop_token_ids class-attribute instance-attribute

stop_token_ids: list[int] | None = []

stream class-attribute instance-attribute

stream: bool | None = False

stream_options class-attribute instance-attribute

stream_options: StreamOptions | None = None

structured_outputs class-attribute instance-attribute

structured_outputs: StructuredOutputsParams | None = Field(
    default=None,
    description="Additional kwargs for structured outputs",
)

suffix class-attribute instance-attribute

suffix: str | None = None

temperature class-attribute instance-attribute

temperature: float | None = None

top_k class-attribute instance-attribute

top_k: int | None = None

top_p class-attribute instance-attribute

top_p: float | None = None

truncate_prompt_tokens class-attribute instance-attribute

truncate_prompt_tokens: (
    Annotated[int, Field(ge=-1, le=max)] | None
) = None
use_beam_search: bool = False

user class-attribute instance-attribute

user: str | None = None

vllm_xargs class-attribute instance-attribute

vllm_xargs: dict[str, str | int | float] | None = Field(
    default=None,
    description="Additional request parameters with string or numeric values, used by custom extensions.",
)

check_cache_salt_support classmethod

check_cache_salt_support(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
@classmethod
def check_cache_salt_support(cls, data):
    if data.get("cache_salt") is not None and (
        not isinstance(data["cache_salt"], str) or not data["cache_salt"]
    ):
        raise ValueError(
            "Parameter 'cache_salt' must be a non-empty string if provided."
        )
    return data

check_logprobs classmethod

check_logprobs(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
@classmethod
def check_logprobs(cls, data):
    if (prompt_logprobs := data.get("prompt_logprobs")) is not None:
        if data.get("stream") and (prompt_logprobs > 0 or prompt_logprobs == -1):
            raise VLLMValidationError(
                "`prompt_logprobs` are not available when `stream=True`.",
                parameter="prompt_logprobs",
            )

        if prompt_logprobs < 0 and prompt_logprobs != -1:
            raise VLLMValidationError(
                "`prompt_logprobs` must be a positive value or -1.",
                parameter="prompt_logprobs",
                value=prompt_logprobs,
            )
    if (logprobs := data.get("logprobs")) is not None and logprobs < 0:
        raise VLLMValidationError(
            "`logprobs` must be a positive value.",
            parameter="logprobs",
            value=logprobs,
        )

    return data

check_structured_outputs_count classmethod

check_structured_outputs_count(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
@classmethod
def check_structured_outputs_count(cls, data):
    if data.get("structured_outputs", None) is None:
        return data

    structured_outputs_kwargs = data["structured_outputs"]
    count = sum(
        structured_outputs_kwargs.get(k) is not None
        for k in ("json", "regex", "choice")
    )
    if count > 1:
        raise VLLMValidationError(
            "You can only use one kind of constraints for structured "
            "outputs ('json', 'regex' or 'choice').",
            parameter="structured_outputs",
        )
    return data

to_beam_search_params

to_beam_search_params(
    max_tokens: int,
    default_sampling_params: dict | None = None,
) -> BeamSearchParams
Source code in vllm/entrypoints/openai/engine/protocol.py
def to_beam_search_params(
    self,
    max_tokens: int,
    default_sampling_params: dict | None = None,
) -> BeamSearchParams:
    if default_sampling_params is None:
        default_sampling_params = {}
    n = self.n if self.n is not None else 1

    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get("temperature", 1.0)

    return BeamSearchParams(
        beam_width=n,
        max_tokens=max_tokens,
        ignore_eos=self.ignore_eos,
        temperature=temperature,
        length_penalty=self.length_penalty,
        include_stop_str_in_output=self.include_stop_str_in_output,
    )

to_sampling_params

to_sampling_params(
    max_tokens: int,
    logits_processor_pattern: str | None,
    default_sampling_params: dict | None = None,
) -> SamplingParams
Source code in vllm/entrypoints/openai/engine/protocol.py
def to_sampling_params(
    self,
    max_tokens: int,
    logits_processor_pattern: str | None,
    default_sampling_params: dict | None = None,
) -> SamplingParams:
    if default_sampling_params is None:
        default_sampling_params = {}

    # Default parameters
    if (repetition_penalty := self.repetition_penalty) is None:
        repetition_penalty = default_sampling_params.get(
            "repetition_penalty",
            self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
        )
    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get(
            "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
        )
    if (top_p := self.top_p) is None:
        top_p = default_sampling_params.get(
            "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
        )
    if (top_k := self.top_k) is None:
        top_k = default_sampling_params.get(
            "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
        )
    if (min_p := self.min_p) is None:
        min_p = default_sampling_params.get(
            "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
        )

    prompt_logprobs = self.prompt_logprobs
    if prompt_logprobs is None and self.echo:
        prompt_logprobs = self.logprobs

    echo_without_generation = self.echo and self.max_tokens == 0

    response_format = self.response_format
    if response_format is not None:
        # If structured outputs wasn't already enabled,
        # we must enable it for these features to work
        if self.structured_outputs is None:
            self.structured_outputs = StructuredOutputsParams()

        # Set structured output params for response format
        if response_format.type == "json_object":
            self.structured_outputs.json_object = True
        elif response_format.type == "json_schema":
            json_schema = response_format.json_schema
            assert json_schema is not None
            self.structured_outputs.json = json_schema.json_schema
        elif response_format.type == "structural_tag":
            structural_tag = response_format
            assert structural_tag is not None and isinstance(
                structural_tag,
                (
                    LegacyStructuralTagResponseFormat,
                    StructuralTagResponseFormat,
                ),
            )
            s_tag_obj = structural_tag.model_dump(by_alias=True)
            self.structured_outputs.structural_tag = json.dumps(s_tag_obj)

    extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {}
    if self.kv_transfer_params:
        # Pass in kv_transfer_params via extra_args
        extra_args["kv_transfer_params"] = self.kv_transfer_params
    return SamplingParams.from_optional(
        n=self.n,
        presence_penalty=self.presence_penalty,
        frequency_penalty=self.frequency_penalty,
        repetition_penalty=repetition_penalty,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        min_p=min_p,
        seed=self.seed,
        stop=self.stop,
        stop_token_ids=self.stop_token_ids,
        logprobs=self.logprobs,
        ignore_eos=self.ignore_eos,
        max_tokens=max_tokens if not echo_without_generation else 1,
        min_tokens=self.min_tokens,
        prompt_logprobs=prompt_logprobs,
        skip_special_tokens=self.skip_special_tokens,
        spaces_between_special_tokens=self.spaces_between_special_tokens,
        include_stop_str_in_output=self.include_stop_str_in_output,
        logits_processors=get_logits_processors(
            self.logits_processors, logits_processor_pattern
        ),
        truncate_prompt_tokens=self.truncate_prompt_tokens,
        output_kind=RequestOutputKind.DELTA
        if self.stream
        else RequestOutputKind.FINAL_ONLY,
        structured_outputs=self.structured_outputs,
        logit_bias=self.logit_bias,
        allowed_token_ids=self.allowed_token_ids,
        extra_args=extra_args or None,
        skip_clone=True,  # Created fresh per request, safe to skip clone
    )

validate_prompt_and_prompt_embeds classmethod

validate_prompt_and_prompt_embeds(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
@classmethod
def validate_prompt_and_prompt_embeds(cls, data):
    prompt = data.get("prompt")
    prompt_embeds = data.get("prompt_embeds")

    prompt_is_empty = prompt is None or (isinstance(prompt, str) and prompt == "")
    embeds_is_empty = prompt_embeds is None or (
        isinstance(prompt_embeds, list) and len(prompt_embeds) == 0
    )

    if prompt_is_empty and embeds_is_empty:
        raise ValueError(
            "Either prompt or prompt_embeds must be provided and non-empty."
        )

    return data

validate_stream_options classmethod

validate_stream_options(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
@classmethod
def validate_stream_options(cls, data):
    if data.get("stream_options") and not data.get("stream"):
        raise VLLMValidationError(
            "Stream options can only be defined when `stream=True`.",
            parameter="stream_options",
        )

    return data

CompletionResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class CompletionResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
    object: Literal["text_completion"] = "text_completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: list[CompletionResponseChoice]
    service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
    system_fingerprint: str | None = None
    usage: UsageInfo

    # vLLM-specific fields that are not in OpenAI spec
    kv_transfer_params: dict[str, Any] | None = Field(
        default=None, description="KVTransfer parameters."
    )

choices instance-attribute

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"cmpl-{random_uuid()}"
)

kv_transfer_params class-attribute instance-attribute

kv_transfer_params: dict[str, Any] | None = Field(
    default=None, description="KVTransfer parameters."
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: Literal['text_completion'] = 'text_completion'

service_tier class-attribute instance-attribute

service_tier: (
    Literal["auto", "default", "flex", "scale", "priority"]
    | None
) = None

system_fingerprint class-attribute instance-attribute

system_fingerprint: str | None = None

usage instance-attribute

usage: UsageInfo

CompletionResponseChoice

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class CompletionResponseChoice(OpenAIBaseModel):
    index: int
    text: str
    logprobs: CompletionLogProbs | None = None
    finish_reason: str | None = None
    stop_reason: int | str | None = Field(
        default=None,
        description=(
            "The stop string or token id that caused the completion "
            "to stop, None if the completion finished for some other reason "
            "including encountering the EOS token"
        ),
    )
    token_ids: list[int] | None = None  # For response
    prompt_logprobs: list[dict[int, Logprob] | None] | None = None
    prompt_token_ids: list[int] | None = None  # For prompt

finish_reason class-attribute instance-attribute

finish_reason: str | None = None

index instance-attribute

index: int

logprobs class-attribute instance-attribute

logprobs: CompletionLogProbs | None = None

prompt_logprobs class-attribute instance-attribute

prompt_logprobs: list[dict[int, Logprob] | None] | None = (
    None
)

prompt_token_ids class-attribute instance-attribute

prompt_token_ids: list[int] | None = None

stop_reason class-attribute instance-attribute

stop_reason: int | str | None = Field(
    default=None,
    description="The stop string or token id that caused the completion to stop, None if the completion finished for some other reason including encountering the EOS token",
)

text instance-attribute

text: str

token_ids class-attribute instance-attribute

token_ids: list[int] | None = None

CompletionResponseStreamChoice

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class CompletionResponseStreamChoice(OpenAIBaseModel):
    index: int
    text: str
    logprobs: CompletionLogProbs | None = None
    finish_reason: str | None = None
    stop_reason: int | str | None = Field(
        default=None,
        description=(
            "The stop string or token id that caused the completion "
            "to stop, None if the completion finished for some other reason "
            "including encountering the EOS token"
        ),
    )
    # not part of the OpenAI spec but for tracing the tokens
    # prompt tokens is put into choice to align with CompletionResponseChoice
    prompt_token_ids: list[int] | None = None
    token_ids: list[int] | None = None

finish_reason class-attribute instance-attribute

finish_reason: str | None = None

index instance-attribute

index: int

logprobs class-attribute instance-attribute

logprobs: CompletionLogProbs | None = None

prompt_token_ids class-attribute instance-attribute

prompt_token_ids: list[int] | None = None

stop_reason class-attribute instance-attribute

stop_reason: int | str | None = Field(
    default=None,
    description="The stop string or token id that caused the completion to stop, None if the completion finished for some other reason including encountering the EOS token",
)

text instance-attribute

text: str

token_ids class-attribute instance-attribute

token_ids: list[int] | None = None

CompletionStreamResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class CompletionStreamResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
    object: str = "text_completion"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: list[CompletionResponseStreamChoice]
    usage: UsageInfo | None = Field(default=None)

choices instance-attribute

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"cmpl-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: str = 'text_completion'

usage class-attribute instance-attribute

usage: UsageInfo | None = Field(default=None)

DeltaFunctionCall

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class DeltaFunctionCall(BaseModel):
    name: str | None = None
    arguments: str | None = None

arguments class-attribute instance-attribute

arguments: str | None = None

name class-attribute instance-attribute

name: str | None = None

DeltaMessage

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class DeltaMessage(OpenAIBaseModel):
    role: str | None = None
    content: str | None = None
    reasoning: str | None = None
    reasoning_content: str | None = None
    """Deprecated: use `reasoning` instead."""
    tool_calls: list[DeltaToolCall] = Field(default_factory=list)

    @model_validator(mode="after")
    def handle_deprecated_reasoning_content(self):
        """Copy reasoning to reasoning_content for backward compatibility."""
        self.reasoning_content = self.reasoning
        return self

content class-attribute instance-attribute

content: str | None = None

reasoning class-attribute instance-attribute

reasoning: str | None = None

reasoning_content class-attribute instance-attribute

reasoning_content: str | None = None

Deprecated: use reasoning instead.

role class-attribute instance-attribute

role: str | None = None

tool_calls class-attribute instance-attribute

tool_calls: list[DeltaToolCall] = Field(
    default_factory=list
)

handle_deprecated_reasoning_content

handle_deprecated_reasoning_content()

Copy reasoning to reasoning_content for backward compatibility.

Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="after")
def handle_deprecated_reasoning_content(self):
    """Copy reasoning to reasoning_content for backward compatibility."""
    self.reasoning_content = self.reasoning
    return self

DeltaToolCall

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class DeltaToolCall(OpenAIBaseModel):
    id: str | None = None
    type: Literal["function"] | None = None
    index: int
    function: DeltaFunctionCall | None = None

function class-attribute instance-attribute

function: DeltaFunctionCall | None = None

id class-attribute instance-attribute

id: str | None = None

index instance-attribute

index: int

type class-attribute instance-attribute

type: Literal['function'] | None = None

ErrorInfo

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ErrorInfo(OpenAIBaseModel):
    message: str
    type: str
    param: str | None = None
    code: int

code instance-attribute

code: int

message instance-attribute

message: str

param class-attribute instance-attribute

param: str | None = None

type instance-attribute

type: str

ErrorResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ErrorResponse(OpenAIBaseModel):
    error: ErrorInfo

error instance-attribute

error: ErrorInfo

ExtractedToolCallInformation

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ExtractedToolCallInformation(BaseModel):
    # indicate if tools were called
    tools_called: bool

    # extracted tool calls
    tool_calls: list[ToolCall]

    # content - per OpenAI spec, content AND tool calls can be returned rarely
    # But some models will do this intentionally
    content: str | None = None

content class-attribute instance-attribute

content: str | None = None

tool_calls instance-attribute

tool_calls: list[ToolCall]

tools_called instance-attribute

tools_called: bool

FunctionCall

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class FunctionCall(OpenAIBaseModel):
    name: str
    arguments: str

arguments instance-attribute

arguments: str

name instance-attribute

name: str

FunctionDefinition

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class FunctionDefinition(OpenAIBaseModel):
    name: str
    description: str | None = None
    parameters: dict[str, Any] | None = None

description class-attribute instance-attribute

description: str | None = None

name instance-attribute

name: str

parameters class-attribute instance-attribute

parameters: dict[str, Any] | None = None

GenerateRequest

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class GenerateRequest(BaseModel):
    request_id: str = Field(
        default_factory=random_uuid,
        description=(
            "The request_id related to this request. If the caller does "
            "not set it, a random_uuid will be generated. This id is used "
            "through out the inference process and return in response."
        ),
    )
    token_ids: list[int]
    """The token ids to generate text from."""

    # features: MultiModalFeatureSpec
    # TODO (NickLucche): implement once Renderer work is completed
    features: str | None = None
    """The processed MM inputs for the model."""

    sampling_params: SamplingParams
    """The sampling parameters for the model."""

    model: str | None = None

    stream: bool | None = False
    stream_options: StreamOptions | None = None
    cache_salt: str | None = Field(
        default=None,
        description=(
            "If specified, the prefix cache will be salted with the provided "
            "string to prevent an attacker to guess prompts in multi-user "
            "environments. The salt should be random, protected from "
            "access by 3rd parties, and long enough to be "
            "unpredictable (e.g., 43 characters base64-encoded, corresponding "
            "to 256 bit)."
        ),
    )
    priority: int = Field(
        default=0,
        description=(
            "The priority of the request (lower means earlier handling; "
            "default: 0). Any priority other than 0 will raise an error "
            "if the served model does not use priority scheduling."
        ),
    )
    kv_transfer_params: dict[str, Any] | None = Field(
        default=None,
        description="KVTransfer parameters used for disaggregated serving.",
    )

cache_salt class-attribute instance-attribute

cache_salt: str | None = Field(
    default=None,
    description="If specified, the prefix cache will be salted with the provided string to prevent an attacker to guess prompts in multi-user environments. The salt should be random, protected from access by 3rd parties, and long enough to be unpredictable (e.g., 43 characters base64-encoded, corresponding to 256 bit).",
)

features class-attribute instance-attribute

features: str | None = None

The processed MM inputs for the model.

kv_transfer_params class-attribute instance-attribute

kv_transfer_params: dict[str, Any] | None = Field(
    default=None,
    description="KVTransfer parameters used for disaggregated serving.",
)

model class-attribute instance-attribute

model: str | None = None

priority class-attribute instance-attribute

priority: int = Field(
    default=0,
    description="The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.",
)

request_id class-attribute instance-attribute

request_id: str = Field(
    default_factory=random_uuid,
    description="The request_id related to this request. If the caller does not set it, a random_uuid will be generated. This id is used through out the inference process and return in response.",
)

sampling_params instance-attribute

sampling_params: SamplingParams

The sampling parameters for the model.

stream class-attribute instance-attribute

stream: bool | None = False

stream_options class-attribute instance-attribute

stream_options: StreamOptions | None = None

token_ids instance-attribute

token_ids: list[int]

The token ids to generate text from.

InputTokensDetails

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class InputTokensDetails(OpenAIBaseModel):
    cached_tokens: int
    input_tokens_per_turn: list[int] = Field(default_factory=list)
    cached_tokens_per_turn: list[int] = Field(default_factory=list)

cached_tokens instance-attribute

cached_tokens: int

cached_tokens_per_turn class-attribute instance-attribute

cached_tokens_per_turn: list[int] = Field(
    default_factory=list
)

input_tokens_per_turn class-attribute instance-attribute

input_tokens_per_turn: list[int] = Field(
    default_factory=list
)

JsonSchemaResponseFormat

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class JsonSchemaResponseFormat(OpenAIBaseModel):
    name: str
    description: str | None = None
    # schema is the field in openai but that causes conflicts with pydantic so
    # instead use json_schema with an alias
    json_schema: dict[str, Any] | None = Field(default=None, alias="schema")
    strict: bool | None = None

description class-attribute instance-attribute

description: str | None = None

json_schema class-attribute instance-attribute

json_schema: dict[str, Any] | None = Field(
    default=None, alias="schema"
)

name instance-attribute

name: str

strict class-attribute instance-attribute

strict: bool | None = None

LegacyStructuralTag

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class LegacyStructuralTag(OpenAIBaseModel):
    begin: str
    # schema is the field, but that causes conflicts with pydantic so
    # instead use structural_tag_schema with an alias
    structural_tag_schema: dict[str, Any] | None = Field(default=None, alias="schema")
    end: str

begin instance-attribute

begin: str

end instance-attribute

end: str

structural_tag_schema class-attribute instance-attribute

structural_tag_schema: dict[str, Any] | None = Field(
    default=None, alias="schema"
)

LegacyStructuralTagResponseFormat

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class LegacyStructuralTagResponseFormat(OpenAIBaseModel):
    type: Literal["structural_tag"]
    structures: list[LegacyStructuralTag]
    triggers: list[str]

structures instance-attribute

triggers instance-attribute

triggers: list[str]

type instance-attribute

type: Literal['structural_tag']

LogitsProcessorConstructor

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class LogitsProcessorConstructor(BaseModel):
    qualname: str
    args: list[Any] | None = None
    kwargs: dict[str, Any] | None = None

    model_config = ConfigDict(extra="forbid")

args class-attribute instance-attribute

args: list[Any] | None = None

kwargs class-attribute instance-attribute

kwargs: dict[str, Any] | None = None

model_config class-attribute instance-attribute

model_config = ConfigDict(extra='forbid')

qualname instance-attribute

qualname: str

ModelCard

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ModelCard(OpenAIBaseModel):
    id: str
    object: str = "model"
    created: int = Field(default_factory=lambda: int(time.time()))
    owned_by: str = "vllm"
    root: str | None = None
    parent: str | None = None
    max_model_len: int | None = None
    permission: list[ModelPermission] = Field(default_factory=list)

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

id instance-attribute

id: str

max_model_len class-attribute instance-attribute

max_model_len: int | None = None

object class-attribute instance-attribute

object: str = 'model'

owned_by class-attribute instance-attribute

owned_by: str = 'vllm'

parent class-attribute instance-attribute

parent: str | None = None

permission class-attribute instance-attribute

permission: list[ModelPermission] = Field(
    default_factory=list
)

root class-attribute instance-attribute

root: str | None = None

ModelList

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ModelList(OpenAIBaseModel):
    object: str = "list"
    data: list[ModelCard] = Field(default_factory=list)

data class-attribute instance-attribute

data: list[ModelCard] = Field(default_factory=list)

object class-attribute instance-attribute

object: str = 'list'

ModelPermission

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ModelPermission(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"modelperm-{random_uuid()}")
    object: str = "model_permission"
    created: int = Field(default_factory=lambda: int(time.time()))
    allow_create_engine: bool = False
    allow_sampling: bool = True
    allow_logprobs: bool = True
    allow_search_indices: bool = False
    allow_view: bool = True
    allow_fine_tuning: bool = False
    organization: str = "*"
    group: str | None = None
    is_blocking: bool = False

allow_create_engine class-attribute instance-attribute

allow_create_engine: bool = False

allow_fine_tuning class-attribute instance-attribute

allow_fine_tuning: bool = False

allow_logprobs class-attribute instance-attribute

allow_logprobs: bool = True

allow_sampling class-attribute instance-attribute

allow_sampling: bool = True

allow_search_indices class-attribute instance-attribute

allow_search_indices: bool = False

allow_view class-attribute instance-attribute

allow_view: bool = True

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

group class-attribute instance-attribute

group: str | None = None

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"modelperm-{random_uuid()}"
)

is_blocking class-attribute instance-attribute

is_blocking: bool = False

object class-attribute instance-attribute

object: str = 'model_permission'

organization class-attribute instance-attribute

organization: str = '*'

OpenAIBaseModel

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class OpenAIBaseModel(BaseModel):
    # OpenAI API does allow extra fields
    model_config = ConfigDict(extra="allow")

    # Cache class field names
    field_names: ClassVar[set[str] | None] = None

    @model_validator(mode="wrap")
    @classmethod
    def __log_extra_fields__(cls, data, handler):
        result = handler(data)
        if not isinstance(data, dict):
            return result
        field_names = cls.field_names
        if field_names is None:
            # Get all class field names and their potential aliases
            field_names = set()
            for field_name, field in cls.model_fields.items():
                field_names.add(field_name)
                if alias := getattr(field, "alias", None):
                    field_names.add(alias)
            cls.field_names = field_names

        # Compare against both field names and aliases
        if any(k not in field_names for k in data):
            logger.warning(
                "The following fields were present in the request but ignored: %s",
                data.keys() - field_names,
            )
        return result

field_names class-attribute

field_names: set[str] | None = None

model_config class-attribute instance-attribute

model_config = ConfigDict(extra='allow')

__log_extra_fields__ classmethod

__log_extra_fields__(data, handler)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="wrap")
@classmethod
def __log_extra_fields__(cls, data, handler):
    result = handler(data)
    if not isinstance(data, dict):
        return result
    field_names = cls.field_names
    if field_names is None:
        # Get all class field names and their potential aliases
        field_names = set()
        for field_name, field in cls.model_fields.items():
            field_names.add(field_name)
            if alias := getattr(field, "alias", None):
                field_names.add(alias)
        cls.field_names = field_names

    # Compare against both field names and aliases
    if any(k not in field_names for k in data):
        logger.warning(
            "The following fields were present in the request but ignored: %s",
            data.keys() - field_names,
        )
    return result

OutputTokensDetails

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class OutputTokensDetails(OpenAIBaseModel):
    reasoning_tokens: int = 0
    tool_output_tokens: int = 0
    output_tokens_per_turn: list[int] = Field(default_factory=list)
    tool_output_tokens_per_turn: list[int] = Field(default_factory=list)

output_tokens_per_turn class-attribute instance-attribute

output_tokens_per_turn: list[int] = Field(
    default_factory=list
)

reasoning_tokens class-attribute instance-attribute

reasoning_tokens: int = 0

tool_output_tokens class-attribute instance-attribute

tool_output_tokens: int = 0

tool_output_tokens_per_turn class-attribute instance-attribute

tool_output_tokens_per_turn: list[int] = Field(
    default_factory=list
)

PromptTokenUsageInfo

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class PromptTokenUsageInfo(OpenAIBaseModel):
    cached_tokens: int | None = None

cached_tokens class-attribute instance-attribute

cached_tokens: int | None = None

RequestResponseMetadata

Bases: BaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class RequestResponseMetadata(BaseModel):
    request_id: str
    final_usage_info: UsageInfo | None = None

final_usage_info class-attribute instance-attribute

final_usage_info: UsageInfo | None = None

request_id instance-attribute

request_id: str

ResponseCompletedEvent

Bases: ResponseCompletedEvent

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseCompletedEvent(OpenAIResponseCompletedEvent):
    response: ResponsesResponse  # type: ignore[override]

response instance-attribute

ResponseCreatedEvent

Bases: ResponseCreatedEvent

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseCreatedEvent(OpenAIResponseCreatedEvent):
    response: ResponsesResponse  # type: ignore[override]

response instance-attribute

ResponseFormat

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseFormat(OpenAIBaseModel):
    # type must be "json_schema", "json_object", or "text"
    type: Literal["text", "json_object", "json_schema"]
    json_schema: JsonSchemaResponseFormat | None = None

json_schema class-attribute instance-attribute

json_schema: JsonSchemaResponseFormat | None = None

type instance-attribute

type: Literal['text', 'json_object', 'json_schema']

ResponseInProgressEvent

Bases: ResponseInProgressEvent

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseInProgressEvent(OpenAIResponseInProgressEvent):
    response: ResponsesResponse  # type: ignore[override]

response instance-attribute

ResponseRawMessageAndToken

Bases: OpenAIBaseModel

Class to show the raw message. If message / tokens diverge, tokens is the source of truth

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseRawMessageAndToken(OpenAIBaseModel):
    """Class to show the raw message.
    If message / tokens diverge, tokens is the source of truth"""

    message: str
    tokens: list[int]
    type: Literal["raw_message_tokens"] = "raw_message_tokens"

message instance-attribute

message: str

tokens instance-attribute

tokens: list[int]

type class-attribute instance-attribute

type: Literal['raw_message_tokens'] = 'raw_message_tokens'

ResponseReasoningPartAddedEvent

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseReasoningPartAddedEvent(OpenAIBaseModel):
    content_index: int
    """The index of the content part that is done."""

    item_id: str
    """The ID of the output item that the content part was added to."""

    output_index: int
    """The index of the output item that the content part was added to."""

    part: ResponseReasoningTextContent
    """The content part that is done."""

    sequence_number: int
    """The sequence number of this event."""

    type: Literal["response.reasoning_part.added"]
    """The type of the event. Always `response.reasoning_part.added`."""

content_index instance-attribute

content_index: int

The index of the content part that is done.

item_id instance-attribute

item_id: str

The ID of the output item that the content part was added to.

output_index instance-attribute

output_index: int

The index of the output item that the content part was added to.

part instance-attribute

part: Content

The content part that is done.

sequence_number instance-attribute

sequence_number: int

The sequence number of this event.

type instance-attribute

type: Literal['response.reasoning_part.added']

The type of the event. Always response.reasoning_part.added.

ResponseReasoningPartDoneEvent

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseReasoningPartDoneEvent(OpenAIBaseModel):
    content_index: int
    """The index of the content part that is done."""

    item_id: str
    """The ID of the output item that the content part was added to."""

    output_index: int
    """The index of the output item that the content part was added to."""

    part: ResponseReasoningTextContent
    """The content part that is done."""

    sequence_number: int
    """The sequence number of this event."""

    type: Literal["response.reasoning_part.done"]
    """The type of the event. Always `response.reasoning_part.done`."""

content_index instance-attribute

content_index: int

The index of the content part that is done.

item_id instance-attribute

item_id: str

The ID of the output item that the content part was added to.

output_index instance-attribute

output_index: int

The index of the output item that the content part was added to.

part instance-attribute

part: Content

The content part that is done.

sequence_number instance-attribute

sequence_number: int

The sequence number of this event.

type instance-attribute

type: Literal['response.reasoning_part.done']

The type of the event. Always response.reasoning_part.done.

ResponseUsage

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponseUsage(OpenAIBaseModel):
    input_tokens: int
    input_tokens_details: InputTokensDetails
    output_tokens: int
    output_tokens_details: OutputTokensDetails
    total_tokens: int

input_tokens instance-attribute

input_tokens: int

input_tokens_details instance-attribute

input_tokens_details: InputTokensDetails

output_tokens instance-attribute

output_tokens: int

output_tokens_details instance-attribute

output_tokens_details: OutputTokensDetails

total_tokens instance-attribute

total_tokens: int

ResponsesRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponsesRequest(OpenAIBaseModel):
    # Ordered by official OpenAI API documentation
    # https://platform.openai.com/docs/api-reference/responses/create
    background: bool | None = False
    include: (
        list[
            Literal[
                "code_interpreter_call.outputs",
                "computer_call_output.output.image_url",
                "file_search_call.results",
                "message.input_image.image_url",
                "message.output_text.logprobs",
                "reasoning.encrypted_content",
            ],
        ]
        | None
    ) = None
    input: str | list[ResponseInputOutputItem]
    instructions: str | None = None
    max_output_tokens: int | None = None
    max_tool_calls: int | None = None
    metadata: Metadata | None = None
    model: str | None = None
    logit_bias: dict[str, float] | None = None
    parallel_tool_calls: bool | None = True
    previous_response_id: str | None = None
    prompt: ResponsePrompt | None = None
    reasoning: Reasoning | None = None
    service_tier: Literal["auto", "default", "flex", "scale", "priority"] = "auto"
    store: bool | None = True
    stream: bool | None = False
    temperature: float | None = None
    text: ResponseTextConfig | None = None
    tool_choice: ToolChoice = "auto"
    tools: list[Tool] = Field(default_factory=list)
    top_logprobs: int | None = 0
    top_p: float | None = None
    top_k: int | None = None
    truncation: Literal["auto", "disabled"] | None = "disabled"
    user: str | None = None

    # --8<-- [start:responses-extra-params]
    request_id: str = Field(
        default_factory=lambda: f"resp_{random_uuid()}",
        description=(
            "The request_id related to this request. If the caller does "
            "not set it, a random_uuid will be generated. This id is used "
            "through out the inference process and return in response."
        ),
    )
    mm_processor_kwargs: dict[str, Any] | None = Field(
        default=None,
        description=("Additional kwargs to pass to the HF processor."),
    )
    priority: int = Field(
        default=0,
        description=(
            "The priority of the request (lower means earlier handling; "
            "default: 0). Any priority other than 0 will raise an error "
            "if the served model does not use priority scheduling."
        ),
    )
    cache_salt: str | None = Field(
        default=None,
        description=(
            "If specified, the prefix cache will be salted with the provided "
            "string to prevent an attacker to guess prompts in multi-user "
            "environments. The salt should be random, protected from "
            "access by 3rd parties, and long enough to be "
            "unpredictable (e.g., 43 characters base64-encoded, corresponding "
            "to 256 bit)."
        ),
    )

    enable_response_messages: bool = Field(
        default=False,
        description=(
            "Dictates whether or not to return messages as part of the "
            "response object. Currently only supported for"
            "non-background and gpt-oss only. "
        ),
    )
    # similar to input_messages / output_messages in ResponsesResponse
    # we take in previous_input_messages (ie in harmony format)
    # this cannot be used in conjunction with previous_response_id
    # TODO: consider supporting non harmony messages as well
    previous_input_messages: list[OpenAIHarmonyMessage | dict] | None = None
    # --8<-- [end:responses-extra-params]

    _DEFAULT_SAMPLING_PARAMS = {
        "temperature": 1.0,
        "top_p": 1.0,
        "top_k": 0,
    }

    def to_sampling_params(
        self,
        default_max_tokens: int,
        default_sampling_params: dict | None = None,
    ) -> SamplingParams:
        if self.max_output_tokens is None:
            max_tokens = default_max_tokens
        else:
            max_tokens = min(self.max_output_tokens, default_max_tokens)

        default_sampling_params = default_sampling_params or {}
        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get(
                "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
            )
        if (top_p := self.top_p) is None:
            top_p = default_sampling_params.get(
                "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
            )
        if (top_k := self.top_k) is None:
            top_k = default_sampling_params.get(
                "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
            )
        stop_token_ids = default_sampling_params.get("stop_token_ids")

        # Structured output
        structured_outputs = None
        if self.text is not None and self.text.format is not None:
            response_format = self.text.format
            if (
                response_format.type == "json_schema"
                and response_format.schema_ is not None
            ):
                structured_outputs = StructuredOutputsParams(
                    json=response_format.schema_
                )
            elif response_format.type == "json_object":
                raise NotImplementedError("json_object is not supported")

        # TODO: add more parameters
        return SamplingParams.from_optional(
            temperature=temperature,
            top_p=top_p,
            top_k=top_k,
            max_tokens=max_tokens,
            logprobs=self.top_logprobs if self.is_include_output_logprobs() else None,
            stop_token_ids=stop_token_ids,
            output_kind=(
                RequestOutputKind.DELTA if self.stream else RequestOutputKind.FINAL_ONLY
            ),
            structured_outputs=structured_outputs,
            logit_bias=self.logit_bias,
            skip_clone=True,  # Created fresh per request, safe to skip clone
        )

    def is_include_output_logprobs(self) -> bool:
        """Check if the request includes output logprobs."""
        if self.include is None:
            return False
        return (
            isinstance(self.include, list)
            and "message.output_text.logprobs" in self.include
        )

    @model_validator(mode="before")
    def validate_background(cls, data):
        if not data.get("background"):
            return data
        if not data.get("store", True):
            raise ValueError("background can only be used when `store` is true")
        return data

    @model_validator(mode="before")
    def validate_prompt(cls, data):
        if data.get("prompt") is not None:
            raise VLLMValidationError(
                "prompt template is not supported", parameter="prompt"
            )
        return data

    @model_validator(mode="before")
    def check_cache_salt_support(cls, data):
        if data.get("cache_salt") is not None and (
            not isinstance(data["cache_salt"], str) or not data["cache_salt"]
        ):
            raise ValueError(
                "Parameter 'cache_salt' must be a non-empty string if provided."
            )
        return data

    @model_validator(mode="before")
    def function_call_parsing(cls, data):
        """Parse function_call dictionaries into ResponseFunctionToolCall objects.
        This ensures Pydantic can properly resolve union types in the input field.
        Function calls provided as dicts are converted to ResponseFunctionToolCall
        objects before validation, while invalid structures are left for Pydantic
        to reject with appropriate error messages.
        """

        input_data = data.get("input")

        # Early return for None, strings, or bytes
        # (strings are iterable but shouldn't be processed)
        if input_data is None or isinstance(input_data, (str, bytes)):
            return data

        # Convert iterators (like ValidatorIterator) to list
        if not isinstance(input_data, list):
            try:
                input_data = list(input_data)
            except TypeError:
                # Not iterable, leave as-is for Pydantic to handle
                return data

        processed_input = []
        for item in input_data:
            if isinstance(item, dict) and item.get("type") == "function_call":
                try:
                    processed_input.append(ResponseFunctionToolCall(**item))
                except ValidationError:
                    # Let Pydantic handle validation for malformed function calls
                    logger.debug(
                        "Failed to parse function_call to ResponseFunctionToolCall, "
                        "leaving for Pydantic validation"
                    )
                    processed_input.append(item)
            else:
                processed_input.append(item)

        data["input"] = processed_input
        return data

_DEFAULT_SAMPLING_PARAMS class-attribute instance-attribute

_DEFAULT_SAMPLING_PARAMS = {
    "temperature": 1.0,
    "top_p": 1.0,
    "top_k": 0,
}

background class-attribute instance-attribute

background: bool | None = False

cache_salt class-attribute instance-attribute

cache_salt: str | None = Field(
    default=None,
    description="If specified, the prefix cache will be salted with the provided string to prevent an attacker to guess prompts in multi-user environments. The salt should be random, protected from access by 3rd parties, and long enough to be unpredictable (e.g., 43 characters base64-encoded, corresponding to 256 bit).",
)

enable_response_messages class-attribute instance-attribute

enable_response_messages: bool = Field(
    default=False,
    description="Dictates whether or not to return messages as part of the response object. Currently only supported fornon-background and gpt-oss only. ",
)

include class-attribute instance-attribute

include: (
    list[
        Literal[
            "code_interpreter_call.outputs",
            "computer_call_output.output.image_url",
            "file_search_call.results",
            "message.input_image.image_url",
            "message.output_text.logprobs",
            "reasoning.encrypted_content",
        ],
    ]
    | None
) = None

input instance-attribute

instructions class-attribute instance-attribute

instructions: str | None = None

logit_bias class-attribute instance-attribute

logit_bias: dict[str, float] | None = None

max_output_tokens class-attribute instance-attribute

max_output_tokens: int | None = None

max_tool_calls class-attribute instance-attribute

max_tool_calls: int | None = None

metadata class-attribute instance-attribute

metadata: Metadata | None = None

mm_processor_kwargs class-attribute instance-attribute

mm_processor_kwargs: dict[str, Any] | None = Field(
    default=None,
    description="Additional kwargs to pass to the HF processor.",
)

model class-attribute instance-attribute

model: str | None = None

parallel_tool_calls class-attribute instance-attribute

parallel_tool_calls: bool | None = True

previous_input_messages class-attribute instance-attribute

previous_input_messages: list[Message | dict] | None = None

previous_response_id class-attribute instance-attribute

previous_response_id: str | None = None

priority class-attribute instance-attribute

priority: int = Field(
    default=0,
    description="The priority of the request (lower means earlier handling; default: 0). Any priority other than 0 will raise an error if the served model does not use priority scheduling.",
)

prompt class-attribute instance-attribute

prompt: ResponsePrompt | None = None

reasoning class-attribute instance-attribute

reasoning: Reasoning | None = None

request_id class-attribute instance-attribute

request_id: str = Field(
    default_factory=lambda: f"resp_{random_uuid()}",
    description="The request_id related to this request. If the caller does not set it, a random_uuid will be generated. This id is used through out the inference process and return in response.",
)

service_tier class-attribute instance-attribute

service_tier: Literal[
    "auto", "default", "flex", "scale", "priority"
] = "auto"

store class-attribute instance-attribute

store: bool | None = True

stream class-attribute instance-attribute

stream: bool | None = False

temperature class-attribute instance-attribute

temperature: float | None = None

text class-attribute instance-attribute

text: ResponseFormatTextConfig | None = None

tool_choice class-attribute instance-attribute

tool_choice: ToolChoice = 'auto'

tools class-attribute instance-attribute

tools: list[Tool] = Field(default_factory=list)

top_k class-attribute instance-attribute

top_k: int | None = None

top_logprobs class-attribute instance-attribute

top_logprobs: int | None = 0

top_p class-attribute instance-attribute

top_p: float | None = None

truncation class-attribute instance-attribute

truncation: Literal['auto', 'disabled'] | None = 'disabled'

user class-attribute instance-attribute

user: str | None = None

check_cache_salt_support

check_cache_salt_support(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
def check_cache_salt_support(cls, data):
    if data.get("cache_salt") is not None and (
        not isinstance(data["cache_salt"], str) or not data["cache_salt"]
    ):
        raise ValueError(
            "Parameter 'cache_salt' must be a non-empty string if provided."
        )
    return data

function_call_parsing

function_call_parsing(data)

Parse function_call dictionaries into ResponseFunctionToolCall objects. This ensures Pydantic can properly resolve union types in the input field. Function calls provided as dicts are converted to ResponseFunctionToolCall objects before validation, while invalid structures are left for Pydantic to reject with appropriate error messages.

Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
def function_call_parsing(cls, data):
    """Parse function_call dictionaries into ResponseFunctionToolCall objects.
    This ensures Pydantic can properly resolve union types in the input field.
    Function calls provided as dicts are converted to ResponseFunctionToolCall
    objects before validation, while invalid structures are left for Pydantic
    to reject with appropriate error messages.
    """

    input_data = data.get("input")

    # Early return for None, strings, or bytes
    # (strings are iterable but shouldn't be processed)
    if input_data is None or isinstance(input_data, (str, bytes)):
        return data

    # Convert iterators (like ValidatorIterator) to list
    if not isinstance(input_data, list):
        try:
            input_data = list(input_data)
        except TypeError:
            # Not iterable, leave as-is for Pydantic to handle
            return data

    processed_input = []
    for item in input_data:
        if isinstance(item, dict) and item.get("type") == "function_call":
            try:
                processed_input.append(ResponseFunctionToolCall(**item))
            except ValidationError:
                # Let Pydantic handle validation for malformed function calls
                logger.debug(
                    "Failed to parse function_call to ResponseFunctionToolCall, "
                    "leaving for Pydantic validation"
                )
                processed_input.append(item)
        else:
            processed_input.append(item)

    data["input"] = processed_input
    return data

is_include_output_logprobs

is_include_output_logprobs() -> bool

Check if the request includes output logprobs.

Source code in vllm/entrypoints/openai/engine/protocol.py
def is_include_output_logprobs(self) -> bool:
    """Check if the request includes output logprobs."""
    if self.include is None:
        return False
    return (
        isinstance(self.include, list)
        and "message.output_text.logprobs" in self.include
    )

to_sampling_params

to_sampling_params(
    default_max_tokens: int,
    default_sampling_params: dict | None = None,
) -> SamplingParams
Source code in vllm/entrypoints/openai/engine/protocol.py
def to_sampling_params(
    self,
    default_max_tokens: int,
    default_sampling_params: dict | None = None,
) -> SamplingParams:
    if self.max_output_tokens is None:
        max_tokens = default_max_tokens
    else:
        max_tokens = min(self.max_output_tokens, default_max_tokens)

    default_sampling_params = default_sampling_params or {}
    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get(
            "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
        )
    if (top_p := self.top_p) is None:
        top_p = default_sampling_params.get(
            "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
        )
    if (top_k := self.top_k) is None:
        top_k = default_sampling_params.get(
            "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
        )
    stop_token_ids = default_sampling_params.get("stop_token_ids")

    # Structured output
    structured_outputs = None
    if self.text is not None and self.text.format is not None:
        response_format = self.text.format
        if (
            response_format.type == "json_schema"
            and response_format.schema_ is not None
        ):
            structured_outputs = StructuredOutputsParams(
                json=response_format.schema_
            )
        elif response_format.type == "json_object":
            raise NotImplementedError("json_object is not supported")

    # TODO: add more parameters
    return SamplingParams.from_optional(
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        max_tokens=max_tokens,
        logprobs=self.top_logprobs if self.is_include_output_logprobs() else None,
        stop_token_ids=stop_token_ids,
        output_kind=(
            RequestOutputKind.DELTA if self.stream else RequestOutputKind.FINAL_ONLY
        ),
        structured_outputs=structured_outputs,
        logit_bias=self.logit_bias,
        skip_clone=True,  # Created fresh per request, safe to skip clone
    )

validate_background

validate_background(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
def validate_background(cls, data):
    if not data.get("background"):
        return data
    if not data.get("store", True):
        raise ValueError("background can only be used when `store` is true")
    return data

validate_prompt

validate_prompt(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
def validate_prompt(cls, data):
    if data.get("prompt") is not None:
        raise VLLMValidationError(
            "prompt template is not supported", parameter="prompt"
        )
    return data

ResponsesResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ResponsesResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"resp_{random_uuid()}")
    created_at: int = Field(default_factory=lambda: int(time.time()))
    # error: Optional[ResponseError] = None
    incomplete_details: IncompleteDetails | None = None
    instructions: str | None = None
    metadata: Metadata | None = None
    model: str
    object: Literal["response"] = "response"
    output: list[ResponseOutputItem]
    parallel_tool_calls: bool
    temperature: float
    tool_choice: ToolChoice
    tools: list[Tool]
    top_p: float
    background: bool
    max_output_tokens: int
    max_tool_calls: int | None = None
    previous_response_id: str | None = None
    prompt: ResponsePrompt | None = None
    reasoning: Reasoning | None = None
    service_tier: Literal["auto", "default", "flex", "scale", "priority"]
    status: ResponseStatus
    text: ResponseTextConfig | None = None
    top_logprobs: int | None = None
    truncation: Literal["auto", "disabled"]
    usage: ResponseUsage | None = None
    user: str | None = None

    # --8<-- [start:responses-response-extra-params]
    # These are populated when enable_response_messages is set to True
    # NOTE: custom serialization is needed
    # see serialize_input_messages and serialize_output_messages
    input_messages: ResponseInputOutputMessage | None = Field(
        default=None,
        description=(
            "If enable_response_messages, we can show raw token input to model."
        ),
    )
    output_messages: ResponseInputOutputMessage | None = Field(
        default=None,
        description=(
            "If enable_response_messages, we can show raw token output of model."
        ),
    )
    # --8<-- [end:responses-response-extra-params]

    # NOTE: openAI harmony doesn't serialize TextContent properly,
    # TODO: this fixes for TextContent, but need to verify for tools etc
    # https://github.com/openai/harmony/issues/78
    @field_serializer("output_messages", when_used="json")
    def serialize_output_messages(self, msgs, _info):
        return serialize_messages(msgs)

    # NOTE: openAI harmony doesn't serialize TextContent properly, this fixes it
    # https://github.com/openai/harmony/issues/78
    @field_serializer("input_messages", when_used="json")
    def serialize_input_messages(self, msgs, _info):
        return serialize_messages(msgs)

    @classmethod
    def from_request(
        cls,
        request: ResponsesRequest,
        sampling_params: SamplingParams,
        model_name: str,
        created_time: int,
        output: list[ResponseOutputItem],
        status: ResponseStatus,
        usage: ResponseUsage | None = None,
        input_messages: ResponseInputOutputMessage | None = None,
        output_messages: ResponseInputOutputMessage | None = None,
    ) -> "ResponsesResponse":
        incomplete_details: IncompleteDetails | None = None
        if status == "incomplete":
            incomplete_details = IncompleteDetails(reason="max_output_tokens")
        # TODO: implement the other reason for incomplete_details,
        # which is content_filter
        # incomplete_details = IncompleteDetails(reason='content_filter')
        return cls(
            id=request.request_id,
            created_at=created_time,
            incomplete_details=incomplete_details,
            instructions=request.instructions,
            metadata=request.metadata,
            model=model_name,
            output=output,
            input_messages=input_messages,
            output_messages=output_messages,
            parallel_tool_calls=request.parallel_tool_calls,
            temperature=sampling_params.temperature,
            tool_choice=request.tool_choice,
            tools=request.tools,
            top_p=sampling_params.top_p,
            background=request.background,
            max_output_tokens=sampling_params.max_tokens,
            max_tool_calls=request.max_tool_calls,
            previous_response_id=request.previous_response_id,
            prompt=request.prompt,
            reasoning=request.reasoning,
            service_tier=request.service_tier,
            status=status,
            text=request.text,
            top_logprobs=sampling_params.logprobs,
            truncation=request.truncation,
            user=request.user,
            usage=usage,
        )

background instance-attribute

background: bool

created_at class-attribute instance-attribute

created_at: int = Field(default_factory=lambda: int(time()))

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"resp_{random_uuid()}"
)

incomplete_details class-attribute instance-attribute

incomplete_details: IncompleteDetails | None = None

input_messages class-attribute instance-attribute

input_messages: ResponseInputOutputMessage | None = Field(
    default=None,
    description="If enable_response_messages, we can show raw token input to model.",
)

instructions class-attribute instance-attribute

instructions: str | None = None

max_output_tokens instance-attribute

max_output_tokens: int

max_tool_calls class-attribute instance-attribute

max_tool_calls: int | None = None

metadata class-attribute instance-attribute

metadata: Metadata | None = None

model instance-attribute

model: str

object class-attribute instance-attribute

object: Literal['response'] = 'response'

output instance-attribute

output: list[ResponseOutputItem]

output_messages class-attribute instance-attribute

output_messages: ResponseInputOutputMessage | None = Field(
    default=None,
    description="If enable_response_messages, we can show raw token output of model.",
)

parallel_tool_calls instance-attribute

parallel_tool_calls: bool

previous_response_id class-attribute instance-attribute

previous_response_id: str | None = None

prompt class-attribute instance-attribute

prompt: ResponsePrompt | None = None

reasoning class-attribute instance-attribute

reasoning: Reasoning | None = None

service_tier instance-attribute

service_tier: Literal[
    "auto", "default", "flex", "scale", "priority"
]

status instance-attribute

status: ResponseStatus

temperature instance-attribute

temperature: float

text class-attribute instance-attribute

text: ResponseFormatTextConfig | None = None

tool_choice instance-attribute

tool_choice: ToolChoice

tools instance-attribute

tools: list[Tool]

top_logprobs class-attribute instance-attribute

top_logprobs: int | None = None

top_p instance-attribute

top_p: float

truncation instance-attribute

truncation: Literal['auto', 'disabled']

usage class-attribute instance-attribute

usage: ResponseUsage | None = None

user class-attribute instance-attribute

user: str | None = None

from_request classmethod

from_request(
    request: ResponsesRequest,
    sampling_params: SamplingParams,
    model_name: str,
    created_time: int,
    output: list[ResponseOutputItem],
    status: ResponseStatus,
    usage: ResponseUsage | None = None,
    input_messages: ResponseInputOutputMessage
    | None = None,
    output_messages: ResponseInputOutputMessage
    | None = None,
) -> ResponsesResponse
Source code in vllm/entrypoints/openai/engine/protocol.py
@classmethod
def from_request(
    cls,
    request: ResponsesRequest,
    sampling_params: SamplingParams,
    model_name: str,
    created_time: int,
    output: list[ResponseOutputItem],
    status: ResponseStatus,
    usage: ResponseUsage | None = None,
    input_messages: ResponseInputOutputMessage | None = None,
    output_messages: ResponseInputOutputMessage | None = None,
) -> "ResponsesResponse":
    incomplete_details: IncompleteDetails | None = None
    if status == "incomplete":
        incomplete_details = IncompleteDetails(reason="max_output_tokens")
    # TODO: implement the other reason for incomplete_details,
    # which is content_filter
    # incomplete_details = IncompleteDetails(reason='content_filter')
    return cls(
        id=request.request_id,
        created_at=created_time,
        incomplete_details=incomplete_details,
        instructions=request.instructions,
        metadata=request.metadata,
        model=model_name,
        output=output,
        input_messages=input_messages,
        output_messages=output_messages,
        parallel_tool_calls=request.parallel_tool_calls,
        temperature=sampling_params.temperature,
        tool_choice=request.tool_choice,
        tools=request.tools,
        top_p=sampling_params.top_p,
        background=request.background,
        max_output_tokens=sampling_params.max_tokens,
        max_tool_calls=request.max_tool_calls,
        previous_response_id=request.previous_response_id,
        prompt=request.prompt,
        reasoning=request.reasoning,
        service_tier=request.service_tier,
        status=status,
        text=request.text,
        top_logprobs=sampling_params.logprobs,
        truncation=request.truncation,
        user=request.user,
        usage=usage,
    )

serialize_input_messages

serialize_input_messages(msgs, _info)
Source code in vllm/entrypoints/openai/engine/protocol.py
@field_serializer("input_messages", when_used="json")
def serialize_input_messages(self, msgs, _info):
    return serialize_messages(msgs)

serialize_output_messages

serialize_output_messages(msgs, _info)
Source code in vllm/entrypoints/openai/engine/protocol.py
@field_serializer("output_messages", when_used="json")
def serialize_output_messages(self, msgs, _info):
    return serialize_messages(msgs)

StreamOptions

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class StreamOptions(OpenAIBaseModel):
    include_usage: bool | None = True
    continuous_usage_stats: bool | None = False

continuous_usage_stats class-attribute instance-attribute

continuous_usage_stats: bool | None = False

include_usage class-attribute instance-attribute

include_usage: bool | None = True

StructuralTagResponseFormat

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class StructuralTagResponseFormat(OpenAIBaseModel):
    type: Literal["structural_tag"]
    format: Any

format instance-attribute

format: Any

type instance-attribute

type: Literal['structural_tag']

ToolCall

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class ToolCall(OpenAIBaseModel):
    id: str = Field(default_factory=make_tool_call_id)
    type: Literal["function"] = "function"
    function: FunctionCall

function instance-attribute

function: FunctionCall

id class-attribute instance-attribute

id: str = Field(default_factory=make_tool_call_id)

type class-attribute instance-attribute

type: Literal['function'] = 'function'

TranscriptionRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranscriptionRequest(OpenAIBaseModel):
    # Ordered by official OpenAI API documentation
    # https://platform.openai.com/docs/api-reference/audio/createTranscription

    file: UploadFile
    """
    The audio file object (not file name) to transcribe, in one of these
    formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
    """

    model: str | None = None
    """ID of the model to use.
    """

    language: str | None = None
    """The language of the input audio.

    Supplying the input language in
    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format
    will improve accuracy and latency.
    """

    prompt: str = Field(default="")
    """An optional text to guide the model's style or continue a previous audio
    segment.

    The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
    should match the audio language.
    """

    response_format: AudioResponseFormat = Field(default="json")
    """
    The format of the output, in one of these options: `json`, `text`, `srt`,
    `verbose_json`, or `vtt`.
    """

    ## TODO (varun) : Support if set to 0, certain thresholds are met !!

    timestamp_granularities: list[Literal["word", "segment"]] = Field(
        alias="timestamp_granularities[]", default=[]
    )
    """The timestamp granularities to populate for this transcription.

    `response_format` must be set `verbose_json` to use timestamp granularities.
    Either or both of these options are supported: `word`, or `segment`. Note:
    There is no additional latency for segment timestamps, but generating word
    timestamps incurs additional latency.
    """

    stream: bool | None = False
    """When set, it will enable output to be streamed in a similar fashion
    as the Chat Completion endpoint.
    """
    # --8<-- [start:transcription-extra-params]
    # Flattened stream option to simplify form data.
    stream_include_usage: bool | None = False
    stream_continuous_usage_stats: bool | None = False

    vllm_xargs: dict[str, str | int | float] | None = Field(
        default=None,
        description=(
            "Additional request parameters with string or "
            "numeric values, used by custom extensions."
        ),
    )
    # --8<-- [end:transcription-extra-params]

    to_language: str | None = None
    """The language of the output audio we transcribe to.

    Please note that this is not currently used by supported models at this
    time, but it is a placeholder for future use, matching translation api.
    """

    # --8<-- [start:transcription-sampling-params]
    temperature: float = Field(default=0.0)
    """The sampling temperature, between 0 and 1.

    Higher values like 0.8 will make the output more random, while lower values
    like 0.2 will make it more focused / deterministic. If set to 0, the model
    will use [log probability](https://en.wikipedia.org/wiki/Log_probability)
    to automatically increase the temperature until certain thresholds are hit.
    """

    top_p: float | None = None
    """Enables nucleus (top-p) sampling, where tokens are selected from the
    smallest possible set whose cumulative probability exceeds `p`.
    """

    top_k: int | None = None
    """Limits sampling to the `k` most probable tokens at each step."""

    min_p: float | None = None
    """Filters out tokens with a probability lower than `min_p`, ensuring a
    minimum likelihood threshold during sampling.
    """

    seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
    """The seed to use for sampling."""

    frequency_penalty: float | None = 0.0
    """The frequency penalty to use for sampling."""

    repetition_penalty: float | None = None
    """The repetition penalty to use for sampling."""

    presence_penalty: float | None = 0.0
    """The presence penalty to use for sampling."""

    max_completion_tokens: int | None = None
    """The maximum number of tokens to generate."""
    # --8<-- [end:transcription-sampling-params]

    # Default sampling parameters for transcription requests.
    _DEFAULT_SAMPLING_PARAMS: dict = {
        "repetition_penalty": 1.0,
        "temperature": 1.0,
        "top_p": 1.0,
        "top_k": 0,
        "min_p": 0.0,
    }

    def to_sampling_params(
        self, default_max_tokens: int, default_sampling_params: dict | None = None
    ) -> SamplingParams:
        max_tokens = default_max_tokens

        if default_sampling_params is None:
            default_sampling_params = {}

        # Default parameters
        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get(
                "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
            )
        if (top_p := self.top_p) is None:
            top_p = default_sampling_params.get(
                "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
            )
        if (top_k := self.top_k) is None:
            top_k = default_sampling_params.get(
                "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
            )
        if (min_p := self.min_p) is None:
            min_p = default_sampling_params.get(
                "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
            )

        if (repetition_penalty := self.repetition_penalty) is None:
            repetition_penalty = default_sampling_params.get(
                "repetition_penalty",
                self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
            )

        return SamplingParams.from_optional(
            temperature=temperature,
            max_tokens=max_tokens,
            seed=self.seed,
            top_p=top_p,
            top_k=top_k,
            min_p=min_p,
            frequency_penalty=self.frequency_penalty,
            repetition_penalty=repetition_penalty,
            presence_penalty=self.presence_penalty,
            output_kind=RequestOutputKind.DELTA
            if self.stream
            else RequestOutputKind.FINAL_ONLY,
            extra_args=self.vllm_xargs,
            skip_clone=True,  # Created fresh per request, safe to skip clone
        )

    @model_validator(mode="before")
    @classmethod
    def validate_transcription_request(cls, data):
        if isinstance(data.get("file"), str):
            raise HTTPException(
                status_code=HTTPStatus.UNPROCESSABLE_ENTITY,
                detail="Expected 'file' to be a file-like object, not 'str'.",
            )

        stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"]
        stream = data.get("stream", False)
        if any(bool(data.get(so, False)) for so in stream_opts) and not stream:
            # Find which specific stream option was set
            invalid_param = next(
                (so for so in stream_opts if data.get(so, False)),
                "stream_include_usage",
            )
            raise VLLMValidationError(
                "Stream options can only be defined when `stream=True`.",
                parameter=invalid_param,
            )

        return data

_DEFAULT_SAMPLING_PARAMS class-attribute instance-attribute

_DEFAULT_SAMPLING_PARAMS: dict = {
    "repetition_penalty": 1.0,
    "temperature": 1.0,
    "top_p": 1.0,
    "top_k": 0,
    "min_p": 0.0,
}

file instance-attribute

file: UploadFile

The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.

frequency_penalty class-attribute instance-attribute

frequency_penalty: float | None = 0.0

The frequency penalty to use for sampling.

language class-attribute instance-attribute

language: str | None = None

The language of the input audio.

Supplying the input language in ISO-639-1 format will improve accuracy and latency.

max_completion_tokens class-attribute instance-attribute

max_completion_tokens: int | None = None

The maximum number of tokens to generate.

min_p class-attribute instance-attribute

min_p: float | None = None

Filters out tokens with a probability lower than min_p, ensuring a minimum likelihood threshold during sampling.

model class-attribute instance-attribute

model: str | None = None

ID of the model to use.

presence_penalty class-attribute instance-attribute

presence_penalty: float | None = 0.0

The presence penalty to use for sampling.

prompt class-attribute instance-attribute

prompt: str = Field(default='')

An optional text to guide the model's style or continue a previous audio segment.

The prompt should match the audio language.

repetition_penalty class-attribute instance-attribute

repetition_penalty: float | None = None

The repetition penalty to use for sampling.

response_format class-attribute instance-attribute

response_format: AudioResponseFormat = Field(default="json")

The format of the output, in one of these options: json, text, srt, verbose_json, or vtt.

seed class-attribute instance-attribute

seed: int | None = Field(None, ge=min, le=max)

The seed to use for sampling.

stream class-attribute instance-attribute

stream: bool | None = False

When set, it will enable output to be streamed in a similar fashion as the Chat Completion endpoint.

stream_continuous_usage_stats class-attribute instance-attribute

stream_continuous_usage_stats: bool | None = False

stream_include_usage class-attribute instance-attribute

stream_include_usage: bool | None = False

temperature class-attribute instance-attribute

temperature: float = Field(default=0.0)

The sampling temperature, between 0 and 1.

Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused / deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.

timestamp_granularities class-attribute instance-attribute

timestamp_granularities: list[
    Literal["word", "segment"]
] = Field(alias="timestamp_granularities[]", default=[])

The timestamp granularities to populate for this transcription.

response_format must be set verbose_json to use timestamp granularities. Either or both of these options are supported: word, or segment. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency.

to_language class-attribute instance-attribute

to_language: str | None = None

The language of the output audio we transcribe to.

Please note that this is not currently used by supported models at this time, but it is a placeholder for future use, matching translation api.

top_k class-attribute instance-attribute

top_k: int | None = None

Limits sampling to the k most probable tokens at each step.

top_p class-attribute instance-attribute

top_p: float | None = None

Enables nucleus (top-p) sampling, where tokens are selected from the smallest possible set whose cumulative probability exceeds p.

vllm_xargs class-attribute instance-attribute

vllm_xargs: dict[str, str | int | float] | None = Field(
    default=None,
    description="Additional request parameters with string or numeric values, used by custom extensions.",
)

to_sampling_params

to_sampling_params(
    default_max_tokens: int,
    default_sampling_params: dict | None = None,
) -> SamplingParams
Source code in vllm/entrypoints/openai/engine/protocol.py
def to_sampling_params(
    self, default_max_tokens: int, default_sampling_params: dict | None = None
) -> SamplingParams:
    max_tokens = default_max_tokens

    if default_sampling_params is None:
        default_sampling_params = {}

    # Default parameters
    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get(
            "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
        )
    if (top_p := self.top_p) is None:
        top_p = default_sampling_params.get(
            "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"]
        )
    if (top_k := self.top_k) is None:
        top_k = default_sampling_params.get(
            "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"]
        )
    if (min_p := self.min_p) is None:
        min_p = default_sampling_params.get(
            "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"]
        )

    if (repetition_penalty := self.repetition_penalty) is None:
        repetition_penalty = default_sampling_params.get(
            "repetition_penalty",
            self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
        )

    return SamplingParams.from_optional(
        temperature=temperature,
        max_tokens=max_tokens,
        seed=self.seed,
        top_p=top_p,
        top_k=top_k,
        min_p=min_p,
        frequency_penalty=self.frequency_penalty,
        repetition_penalty=repetition_penalty,
        presence_penalty=self.presence_penalty,
        output_kind=RequestOutputKind.DELTA
        if self.stream
        else RequestOutputKind.FINAL_ONLY,
        extra_args=self.vllm_xargs,
        skip_clone=True,  # Created fresh per request, safe to skip clone
    )

validate_transcription_request classmethod

validate_transcription_request(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
@classmethod
def validate_transcription_request(cls, data):
    if isinstance(data.get("file"), str):
        raise HTTPException(
            status_code=HTTPStatus.UNPROCESSABLE_ENTITY,
            detail="Expected 'file' to be a file-like object, not 'str'.",
        )

    stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"]
    stream = data.get("stream", False)
    if any(bool(data.get(so, False)) for so in stream_opts) and not stream:
        # Find which specific stream option was set
        invalid_param = next(
            (so for so in stream_opts if data.get(so, False)),
            "stream_include_usage",
        )
        raise VLLMValidationError(
            "Stream options can only be defined when `stream=True`.",
            parameter=invalid_param,
        )

    return data

TranscriptionResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranscriptionResponse(OpenAIBaseModel):
    text: str
    """The transcribed text."""
    usage: TranscriptionUsageAudio

text instance-attribute

text: str

The transcribed text.

usage instance-attribute

TranscriptionResponseStreamChoice

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranscriptionResponseStreamChoice(OpenAIBaseModel):
    delta: DeltaMessage
    finish_reason: str | None = None
    stop_reason: int | str | None = None

delta instance-attribute

delta: DeltaMessage

finish_reason class-attribute instance-attribute

finish_reason: str | None = None

stop_reason class-attribute instance-attribute

stop_reason: int | str | None = None

TranscriptionResponseVerbose

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranscriptionResponseVerbose(OpenAIBaseModel):
    duration: str
    """The duration of the input audio."""

    language: str
    """The language of the input audio."""

    text: str
    """The transcribed text."""

    segments: list[TranscriptionSegment] | None = None
    """Segments of the transcribed text and their corresponding details."""

    words: list[TranscriptionWord] | None = None
    """Extracted words and their corresponding timestamps."""

duration instance-attribute

duration: str

The duration of the input audio.

language instance-attribute

language: str

The language of the input audio.

segments class-attribute instance-attribute

segments: list[TranscriptionSegment] | None = None

Segments of the transcribed text and their corresponding details.

text instance-attribute

text: str

The transcribed text.

words class-attribute instance-attribute

words: list[TranscriptionWord] | None = None

Extracted words and their corresponding timestamps.

TranscriptionSegment

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranscriptionSegment(OpenAIBaseModel):
    id: int
    """Unique identifier of the segment."""

    avg_logprob: float | None = None
    """Average logprob of the segment.

    If the value is lower than -1, consider the logprobs failed.
    """

    compression_ratio: float | None = None
    """Compression ratio of the segment.

    If the value is greater than 2.4, consider the compression failed.
    """

    end: float
    """End time of the segment in seconds."""

    no_speech_prob: float | None = None
    """Probability of no speech in the segment.

    If the value is higher than 1.0 and the `avg_logprob` is below -1, consider
    this segment silent.
    """

    seek: int
    """Seek offset of the segment."""

    start: float
    """Start time of the segment in seconds."""

    temperature: float
    """Temperature parameter used for generating the segment."""

    text: str
    """Text content of the segment."""

    tokens: list[int]
    """Array of token IDs for the text content."""

avg_logprob class-attribute instance-attribute

avg_logprob: float | None = None

Average logprob of the segment.

If the value is lower than -1, consider the logprobs failed.

compression_ratio class-attribute instance-attribute

compression_ratio: float | None = None

Compression ratio of the segment.

If the value is greater than 2.4, consider the compression failed.

end instance-attribute

end: float

End time of the segment in seconds.

id instance-attribute

id: int

Unique identifier of the segment.

no_speech_prob class-attribute instance-attribute

no_speech_prob: float | None = None

Probability of no speech in the segment.

If the value is higher than 1.0 and the avg_logprob is below -1, consider this segment silent.

seek instance-attribute

seek: int

Seek offset of the segment.

start instance-attribute

start: float

Start time of the segment in seconds.

temperature instance-attribute

temperature: float

Temperature parameter used for generating the segment.

text instance-attribute

text: str

Text content of the segment.

tokens instance-attribute

tokens: list[int]

Array of token IDs for the text content.

TranscriptionStreamResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranscriptionStreamResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"trsc-{random_uuid()}")
    object: Literal["transcription.chunk"] = "transcription.chunk"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: list[TranscriptionResponseStreamChoice]
    usage: UsageInfo | None = Field(default=None)

choices instance-attribute

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"trsc-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: Literal["transcription.chunk"] = (
    "transcription.chunk"
)

usage class-attribute instance-attribute

usage: UsageInfo | None = Field(default=None)

TranscriptionUsageAudio

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranscriptionUsageAudio(OpenAIBaseModel):
    type: Literal["duration"] = "duration"
    seconds: int

seconds instance-attribute

seconds: int

type class-attribute instance-attribute

type: Literal['duration'] = 'duration'

TranscriptionWord

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranscriptionWord(OpenAIBaseModel):
    end: float
    """End time of the word in seconds."""

    start: float
    """Start time of the word in seconds."""

    word: str
    """The text content of the word."""

end instance-attribute

end: float

End time of the word in seconds.

start instance-attribute

start: float

Start time of the word in seconds.

word instance-attribute

word: str

The text content of the word.

TranslationRequest

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranslationRequest(OpenAIBaseModel):
    # Ordered by official OpenAI API documentation
    # https://platform.openai.com/docs/api-reference/audio/createTranslation

    file: UploadFile
    """
    The audio file object (not file name) to translate, in one of these
    formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
    """

    model: str | None = None
    """ID of the model to use.
    """

    prompt: str = Field(default="")
    """An optional text to guide the model's style or continue a previous audio
    segment.

    The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
    should match the audio language.
    """

    response_format: AudioResponseFormat = Field(default="json")
    """
    The format of the output, in one of these options: `json`, `text`, `srt`,
    `verbose_json`, or `vtt`.
    """

    # TODO support additional sampling parameters
    # --8<-- [start:translation-sampling-params]
    seed: int | None = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
    """The seed to use for sampling."""

    temperature: float = Field(default=0.0)
    """The sampling temperature, between 0 and 1.

    Higher values like 0.8 will make the output more random, while lower values
    like 0.2 will make it more focused / deterministic. If set to 0, the model
    will use [log probability](https://en.wikipedia.org/wiki/Log_probability)
    to automatically increase the temperature until certain thresholds are hit.
    """
    # --8<-- [end:translation-sampling-params]

    # --8<-- [start:translation-extra-params]
    language: str | None = None
    """The language of the input audio we translate from.

    Supplying the input language in
    [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format
    will improve accuracy.
    """

    to_language: str | None = None
    """The language of the input audio we translate to.

    Please note that this is not supported by all models, refer to the specific
    model documentation for more details.
    For instance, Whisper only supports `to_language=en`.
    """

    stream: bool | None = False
    """Custom field not present in the original OpenAI definition. When set,
    it will enable output to be streamed in a similar fashion as the Chat
    Completion endpoint.
    """
    # Flattened stream option to simplify form data.
    stream_include_usage: bool | None = False
    stream_continuous_usage_stats: bool | None = False

    max_completion_tokens: int | None = None
    """The maximum number of tokens to generate."""
    # --8<-- [end:translation-extra-params]

    # Default sampling parameters for translation requests.
    _DEFAULT_SAMPLING_PARAMS: dict = {
        "temperature": 0,
    }

    def to_sampling_params(
        self, default_max_tokens: int, default_sampling_params: dict | None = None
    ) -> SamplingParams:
        max_tokens = default_max_tokens

        if default_sampling_params is None:
            default_sampling_params = {}
        # Default parameters
        if (temperature := self.temperature) is None:
            temperature = default_sampling_params.get(
                "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
            )

        return SamplingParams.from_optional(
            temperature=temperature,
            max_tokens=max_tokens,
            seed=self.seed,
            output_kind=RequestOutputKind.DELTA
            if self.stream
            else RequestOutputKind.FINAL_ONLY,
            skip_clone=True,  # Created fresh per request, safe to skip clone
        )

    @model_validator(mode="before")
    @classmethod
    def validate_stream_options(cls, data):
        stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"]
        stream = data.get("stream", False)
        if any(bool(data.get(so, False)) for so in stream_opts) and not stream:
            # Find which specific stream option was set
            invalid_param = next(
                (so for so in stream_opts if data.get(so, False)),
                "stream_include_usage",
            )
            raise VLLMValidationError(
                "Stream options can only be defined when `stream=True`.",
                parameter=invalid_param,
            )

        return data

_DEFAULT_SAMPLING_PARAMS class-attribute instance-attribute

_DEFAULT_SAMPLING_PARAMS: dict = {'temperature': 0}

file instance-attribute

file: UploadFile

The audio file object (not file name) to translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.

language class-attribute instance-attribute

language: str | None = None

The language of the input audio we translate from.

Supplying the input language in ISO-639-1 format will improve accuracy.

max_completion_tokens class-attribute instance-attribute

max_completion_tokens: int | None = None

The maximum number of tokens to generate.

model class-attribute instance-attribute

model: str | None = None

ID of the model to use.

prompt class-attribute instance-attribute

prompt: str = Field(default='')

An optional text to guide the model's style or continue a previous audio segment.

The prompt should match the audio language.

response_format class-attribute instance-attribute

response_format: AudioResponseFormat = Field(default="json")

The format of the output, in one of these options: json, text, srt, verbose_json, or vtt.

seed class-attribute instance-attribute

seed: int | None = Field(None, ge=min, le=max)

The seed to use for sampling.

stream class-attribute instance-attribute

stream: bool | None = False

Custom field not present in the original OpenAI definition. When set, it will enable output to be streamed in a similar fashion as the Chat Completion endpoint.

stream_continuous_usage_stats class-attribute instance-attribute

stream_continuous_usage_stats: bool | None = False

stream_include_usage class-attribute instance-attribute

stream_include_usage: bool | None = False

temperature class-attribute instance-attribute

temperature: float = Field(default=0.0)

The sampling temperature, between 0 and 1.

Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused / deterministic. If set to 0, the model will use log probability to automatically increase the temperature until certain thresholds are hit.

to_language class-attribute instance-attribute

to_language: str | None = None

The language of the input audio we translate to.

Please note that this is not supported by all models, refer to the specific model documentation for more details. For instance, Whisper only supports to_language=en.

to_sampling_params

to_sampling_params(
    default_max_tokens: int,
    default_sampling_params: dict | None = None,
) -> SamplingParams
Source code in vllm/entrypoints/openai/engine/protocol.py
def to_sampling_params(
    self, default_max_tokens: int, default_sampling_params: dict | None = None
) -> SamplingParams:
    max_tokens = default_max_tokens

    if default_sampling_params is None:
        default_sampling_params = {}
    # Default parameters
    if (temperature := self.temperature) is None:
        temperature = default_sampling_params.get(
            "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]
        )

    return SamplingParams.from_optional(
        temperature=temperature,
        max_tokens=max_tokens,
        seed=self.seed,
        output_kind=RequestOutputKind.DELTA
        if self.stream
        else RequestOutputKind.FINAL_ONLY,
        skip_clone=True,  # Created fresh per request, safe to skip clone
    )

validate_stream_options classmethod

validate_stream_options(data)
Source code in vllm/entrypoints/openai/engine/protocol.py
@model_validator(mode="before")
@classmethod
def validate_stream_options(cls, data):
    stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"]
    stream = data.get("stream", False)
    if any(bool(data.get(so, False)) for so in stream_opts) and not stream:
        # Find which specific stream option was set
        invalid_param = next(
            (so for so in stream_opts if data.get(so, False)),
            "stream_include_usage",
        )
        raise VLLMValidationError(
            "Stream options can only be defined when `stream=True`.",
            parameter=invalid_param,
        )

    return data

TranslationResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranslationResponse(OpenAIBaseModel):
    text: str
    """The translated text."""

text instance-attribute

text: str

The translated text.

TranslationResponseStreamChoice

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranslationResponseStreamChoice(OpenAIBaseModel):
    delta: DeltaMessage
    finish_reason: str | None = None
    stop_reason: int | str | None = None

delta instance-attribute

delta: DeltaMessage

finish_reason class-attribute instance-attribute

finish_reason: str | None = None

stop_reason class-attribute instance-attribute

stop_reason: int | str | None = None

TranslationResponseVerbose

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranslationResponseVerbose(OpenAIBaseModel):
    duration: str
    """The duration of the input audio."""

    language: str
    """The language of the input audio."""

    text: str
    """The translated text."""

    segments: list[TranslationSegment] | None = None
    """Segments of the translated text and their corresponding details."""

    words: list[TranslationWord] | None = None
    """Extracted words and their corresponding timestamps."""

duration instance-attribute

duration: str

The duration of the input audio.

language instance-attribute

language: str

The language of the input audio.

segments class-attribute instance-attribute

segments: list[TranslationSegment] | None = None

Segments of the translated text and their corresponding details.

text instance-attribute

text: str

The translated text.

words class-attribute instance-attribute

words: list[TranslationWord] | None = None

Extracted words and their corresponding timestamps.

TranslationSegment

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranslationSegment(OpenAIBaseModel):
    id: int
    """Unique identifier of the segment."""

    avg_logprob: float | None = None
    """Average logprob of the segment.

    If the value is lower than -1, consider the logprobs failed.
    """

    compression_ratio: float | None = None
    """Compression ratio of the segment.

    If the value is greater than 2.4, consider the compression failed.
    """

    end: float
    """End time of the segment in seconds."""

    no_speech_prob: float | None = None
    """Probability of no speech in the segment.

    If the value is higher than 1.0 and the `avg_logprob` is below -1, consider
    this segment silent.
    """

    seek: int
    """Seek offset of the segment."""

    start: float
    """Start time of the segment in seconds."""

    temperature: float
    """Temperature parameter used for generating the segment."""

    text: str
    """Text content of the segment."""

    tokens: list[int]
    """Array of token IDs for the text content."""

avg_logprob class-attribute instance-attribute

avg_logprob: float | None = None

Average logprob of the segment.

If the value is lower than -1, consider the logprobs failed.

compression_ratio class-attribute instance-attribute

compression_ratio: float | None = None

Compression ratio of the segment.

If the value is greater than 2.4, consider the compression failed.

end instance-attribute

end: float

End time of the segment in seconds.

id instance-attribute

id: int

Unique identifier of the segment.

no_speech_prob class-attribute instance-attribute

no_speech_prob: float | None = None

Probability of no speech in the segment.

If the value is higher than 1.0 and the avg_logprob is below -1, consider this segment silent.

seek instance-attribute

seek: int

Seek offset of the segment.

start instance-attribute

start: float

Start time of the segment in seconds.

temperature instance-attribute

temperature: float

Temperature parameter used for generating the segment.

text instance-attribute

text: str

Text content of the segment.

tokens instance-attribute

tokens: list[int]

Array of token IDs for the text content.

TranslationStreamResponse

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranslationStreamResponse(OpenAIBaseModel):
    id: str = Field(default_factory=lambda: f"trsl-{random_uuid()}")
    object: Literal["translation.chunk"] = "translation.chunk"
    created: int = Field(default_factory=lambda: int(time.time()))
    model: str
    choices: list[TranslationResponseStreamChoice]
    usage: UsageInfo | None = Field(default=None)

choices instance-attribute

created class-attribute instance-attribute

created: int = Field(default_factory=lambda: int(time()))

id class-attribute instance-attribute

id: str = Field(
    default_factory=lambda: f"trsl-{random_uuid()}"
)

model instance-attribute

model: str

object class-attribute instance-attribute

object: Literal['translation.chunk'] = 'translation.chunk'

usage class-attribute instance-attribute

usage: UsageInfo | None = Field(default=None)

TranslationWord

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class TranslationWord(OpenAIBaseModel):
    end: float
    """End time of the word in seconds."""

    start: float
    """Start time of the word in seconds."""

    word: str
    """The text content of the word."""

end instance-attribute

end: float

End time of the word in seconds.

start instance-attribute

start: float

Start time of the word in seconds.

word instance-attribute

word: str

The text content of the word.

UsageInfo

Bases: OpenAIBaseModel

Source code in vllm/entrypoints/openai/engine/protocol.py
class UsageInfo(OpenAIBaseModel):
    prompt_tokens: int = 0
    total_tokens: int = 0
    completion_tokens: int | None = 0
    prompt_tokens_details: PromptTokenUsageInfo | None = None

completion_tokens class-attribute instance-attribute

completion_tokens: int | None = 0

prompt_tokens class-attribute instance-attribute

prompt_tokens: int = 0

prompt_tokens_details class-attribute instance-attribute

prompt_tokens_details: PromptTokenUsageInfo | None = None

total_tokens class-attribute instance-attribute

total_tokens: int = 0

get_logits_processors

get_logits_processors(
    processors: LogitsProcessors | None, pattern: str | None
) -> list[Any] | None
Source code in vllm/entrypoints/openai/engine/protocol.py
def get_logits_processors(
    processors: LogitsProcessors | None, pattern: str | None
) -> list[Any] | None:
    if processors and pattern:
        logits_processors = []
        for processor in processors:
            qualname = processor if isinstance(processor, str) else processor.qualname
            if not re.match(pattern, qualname):
                raise ValueError(
                    f"Logits processor '{qualname}' is not allowed by this "
                    "server. See --logits-processor-pattern engine argument "
                    "for more information."
                )
            try:
                logits_processor = resolve_obj_by_qualname(qualname)
            except Exception as e:
                raise ValueError(
                    f"Logits processor '{qualname}' could not be resolved: {e}"
                ) from e
            if isinstance(processor, LogitsProcessorConstructor):
                logits_processor = logits_processor(
                    *processor.args or [], **processor.kwargs or {}
                )
            logits_processors.append(logits_processor)
        return logits_processors
    elif processors:
        raise ValueError(
            "The `logits_processors` argument is not supported by this "
            "server. See --logits-processor-pattern engine argument "
            "for more information."
        )
    return None

serialize_message

serialize_message(msg)

Serializes a single message

Source code in vllm/entrypoints/openai/engine/protocol.py
def serialize_message(msg):
    """
    Serializes a single message
    """
    if isinstance(msg, dict):
        return msg
    elif hasattr(msg, "to_dict"):
        return msg.to_dict()
    else:
        # fallback to pyandic dump
        return msg.model_dump_json()

serialize_messages

serialize_messages(msgs)

Serializes multiple messages

Source code in vllm/entrypoints/openai/engine/protocol.py
def serialize_messages(msgs):
    """
    Serializes multiple messages
    """
    return [serialize_message(msg) for msg in msgs] if msgs else None