Skip to content

vllm.multimodal.profiling

_I module-attribute

_I = TypeVar('_I', bound=BaseProcessingInfo)

logger module-attribute

logger = init_logger(__name__)

BaseDummyInputsBuilder

Bases: ABC, Generic[_I]

Abstract base class that constructs the dummy data to profile multi-modal models.

Source code in vllm/multimodal/profiling.py
class BaseDummyInputsBuilder(ABC, Generic[_I]):
    """
    Abstract base class that constructs the dummy data to profile
    multi-modal models.
    """

    def __init__(self, info: _I) -> None:
        super().__init__()

        self.info = info

    @abstractmethod
    def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
        """
        Build the text input corresponding to `mm_counts`.
        """
        raise NotImplementedError

    @abstractmethod
    def get_dummy_mm_data(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
        mm_options: Mapping[str, BaseDummyOptions] | None = None,
    ) -> MultiModalDataDict:
        """
        Build the multimodal input which, after processing, results in
        the maximum possible number of placeholder tokens.

        Args:
            seq_len: Sequence length
            mm_counts: Count of items per modality
            mm_options: Configurable options per modality (optional).
                       If None, use model defaults for backward compatibility.
                       If provided, models can use these to customize dummy
                       data generation.
        """
        raise NotImplementedError

    def get_dummy_processor_inputs(
        self,
        seq_len: int,
        mm_counts: Mapping[str, int],
        mm_options: Mapping[str, BaseDummyOptions] | None = None,
    ) -> ProcessorInputs:
        """
        Build the input which, after processing, results in
        the maximum possible number of placeholder tokens.

        Args:
            seq_len: Sequence length
            mm_counts: Count of items per modality
            mm_options: Configurable options per modality (optional)
        """
        dummy_text = self.get_dummy_text(mm_counts)

        # Use the unified function for both legacy and configurable cases
        dummy_mm_data = self.get_dummy_mm_data(seq_len, mm_counts, mm_options)

        tokenization_kwargs = {"truncation": False}

        return ProcessorInputs(
            prompt=dummy_text,
            mm_data=dummy_mm_data,
            tokenization_kwargs=tokenization_kwargs,
        )

    def _get_dummy_audios(
        self,
        *,
        length: int,
        num_audios: int,
        overrides: AudioDummyOptions | None = None,
    ) -> list[npt.NDArray]:
        if num_audios == 0:
            return []
        if overrides and overrides.length:
            if overrides.length > length:
                logger.warning(
                    "audio.length override (%d) exceeds model's "
                    "maximum length (%d), will be ignored",
                    overrides.length,
                    length,
                )
            length = min(length, overrides.length)
        audio = np.zeros((length,))
        return [audio] * num_audios

    def _get_dummy_images(
        self,
        *,
        width: int,
        height: int,
        num_images: int,
        overrides: ImageDummyOptions | None = None,
    ) -> list[Image.Image]:
        if num_images == 0:
            return []
        if overrides:
            if overrides.width:
                if overrides.width > width:
                    logger.warning(
                        "image.width override (%d) exceeds model's "
                        "maximum width (%d), will be ignored",
                        overrides.width,
                        width,
                    )
                width = min(width, overrides.width)
            if overrides.height:
                if overrides.height > height:
                    logger.warning(
                        "image.height override (%d) exceeds model's "
                        "maximum height (%d), will be ignored",
                        overrides.height,
                        height,
                    )
                height = min(height, overrides.height)
        image = Image.new("RGB", (width, height), color=255)
        return [image] * num_images

    def _get_dummy_videos(
        self,
        *,
        width: int,
        height: int,
        num_frames: int,
        num_videos: int,
        overrides: VideoDummyOptions | None = None,
    ) -> list[npt.NDArray]:
        if num_videos == 0:
            return []
        if overrides:
            if overrides.num_frames:
                if overrides.num_frames > num_frames:
                    logger.warning(
                        "video.num_frames override (%d) exceeds model's "
                        "maximum number of frames (%d), will be ignored",
                        overrides.num_frames,
                        num_frames,
                    )
                num_frames = min(num_frames, overrides.num_frames)
            if overrides.width:
                if overrides.width > width:
                    logger.warning(
                        "video.width override (%d) exceeds model's "
                        "maximum width (%d), will be ignored",
                        overrides.width,
                        width,
                    )
                width = min(width, overrides.width)
            if overrides.height:
                if overrides.height > height:
                    logger.warning(
                        "video.height override (%d) exceeds model's "
                        "maximum height (%d), will be ignored",
                        overrides.height,
                        height,
                    )
                height = min(height, overrides.height)
        video = np.full((num_frames, width, height, 3), 255, dtype=np.uint8)
        return [video] * num_videos

    def get_dummy_mm_inputs(
        self,
        processor: BaseMultiModalProcessor[_I],
        seq_len: int,
        mm_counts: Mapping[str, int] | None = None,
        mm_options: Mapping[str, BaseDummyOptions] | None = None,
    ) -> MultiModalInputs:
        if mm_counts is None:
            mm_counts = processor.allowed_mm_limits

        processor_inputs = self.get_dummy_processor_inputs(
            seq_len,
            mm_counts=mm_counts,
            mm_options=mm_options,
        )

        return processor.apply(
            prompt=processor_inputs.prompt,
            mm_data=processor_inputs.mm_data,
            hf_processor_mm_kwargs=processor_inputs.hf_processor_mm_kwargs,
            tokenization_kwargs=processor_inputs.tokenization_kwargs,
        )

    def get_decoder_dummy_data(
        self,
        processor: BaseMultiModalProcessor[_I],
        seq_len: int,
        mm_counts: Mapping[str, int] | None = None,
        mm_options: Mapping[str, BaseDummyOptions] | None = None,
    ) -> DummyDecoderData:
        mm_inputs = self.get_dummy_mm_inputs(
            processor,
            seq_len,
            mm_counts=mm_counts,
            mm_options=mm_options,
        )

        prompt_token_ids = mm_inputs["prompt_token_ids"]
        total_len = len(prompt_token_ids)

        if total_len < seq_len:
            prompt_token_ids.extend([0] * (seq_len - total_len))

        return DummyDecoderData(
            prompt_token_ids=prompt_token_ids,
            multi_modal_data=mm_inputs["mm_kwargs"].require_data(),
            multi_modal_placeholders=mm_inputs["mm_placeholders"],
        )

info instance-attribute

info = info

__init__

__init__(info: _I) -> None
Source code in vllm/multimodal/profiling.py
def __init__(self, info: _I) -> None:
    super().__init__()

    self.info = info

_get_dummy_audios

_get_dummy_audios(
    *,
    length: int,
    num_audios: int,
    overrides: AudioDummyOptions | None = None,
) -> list[NDArray]
Source code in vllm/multimodal/profiling.py
def _get_dummy_audios(
    self,
    *,
    length: int,
    num_audios: int,
    overrides: AudioDummyOptions | None = None,
) -> list[npt.NDArray]:
    if num_audios == 0:
        return []
    if overrides and overrides.length:
        if overrides.length > length:
            logger.warning(
                "audio.length override (%d) exceeds model's "
                "maximum length (%d), will be ignored",
                overrides.length,
                length,
            )
        length = min(length, overrides.length)
    audio = np.zeros((length,))
    return [audio] * num_audios

_get_dummy_images

_get_dummy_images(
    *,
    width: int,
    height: int,
    num_images: int,
    overrides: ImageDummyOptions | None = None,
) -> list[Image]
Source code in vllm/multimodal/profiling.py
def _get_dummy_images(
    self,
    *,
    width: int,
    height: int,
    num_images: int,
    overrides: ImageDummyOptions | None = None,
) -> list[Image.Image]:
    if num_images == 0:
        return []
    if overrides:
        if overrides.width:
            if overrides.width > width:
                logger.warning(
                    "image.width override (%d) exceeds model's "
                    "maximum width (%d), will be ignored",
                    overrides.width,
                    width,
                )
            width = min(width, overrides.width)
        if overrides.height:
            if overrides.height > height:
                logger.warning(
                    "image.height override (%d) exceeds model's "
                    "maximum height (%d), will be ignored",
                    overrides.height,
                    height,
                )
            height = min(height, overrides.height)
    image = Image.new("RGB", (width, height), color=255)
    return [image] * num_images

_get_dummy_videos

_get_dummy_videos(
    *,
    width: int,
    height: int,
    num_frames: int,
    num_videos: int,
    overrides: VideoDummyOptions | None = None,
) -> list[NDArray]
Source code in vllm/multimodal/profiling.py
def _get_dummy_videos(
    self,
    *,
    width: int,
    height: int,
    num_frames: int,
    num_videos: int,
    overrides: VideoDummyOptions | None = None,
) -> list[npt.NDArray]:
    if num_videos == 0:
        return []
    if overrides:
        if overrides.num_frames:
            if overrides.num_frames > num_frames:
                logger.warning(
                    "video.num_frames override (%d) exceeds model's "
                    "maximum number of frames (%d), will be ignored",
                    overrides.num_frames,
                    num_frames,
                )
            num_frames = min(num_frames, overrides.num_frames)
        if overrides.width:
            if overrides.width > width:
                logger.warning(
                    "video.width override (%d) exceeds model's "
                    "maximum width (%d), will be ignored",
                    overrides.width,
                    width,
                )
            width = min(width, overrides.width)
        if overrides.height:
            if overrides.height > height:
                logger.warning(
                    "video.height override (%d) exceeds model's "
                    "maximum height (%d), will be ignored",
                    overrides.height,
                    height,
                )
            height = min(height, overrides.height)
    video = np.full((num_frames, width, height, 3), 255, dtype=np.uint8)
    return [video] * num_videos

get_decoder_dummy_data

get_decoder_dummy_data(
    processor: BaseMultiModalProcessor[_I],
    seq_len: int,
    mm_counts: Mapping[str, int] | None = None,
    mm_options: Mapping[str, BaseDummyOptions]
    | None = None,
) -> DummyDecoderData
Source code in vllm/multimodal/profiling.py
def get_decoder_dummy_data(
    self,
    processor: BaseMultiModalProcessor[_I],
    seq_len: int,
    mm_counts: Mapping[str, int] | None = None,
    mm_options: Mapping[str, BaseDummyOptions] | None = None,
) -> DummyDecoderData:
    mm_inputs = self.get_dummy_mm_inputs(
        processor,
        seq_len,
        mm_counts=mm_counts,
        mm_options=mm_options,
    )

    prompt_token_ids = mm_inputs["prompt_token_ids"]
    total_len = len(prompt_token_ids)

    if total_len < seq_len:
        prompt_token_ids.extend([0] * (seq_len - total_len))

    return DummyDecoderData(
        prompt_token_ids=prompt_token_ids,
        multi_modal_data=mm_inputs["mm_kwargs"].require_data(),
        multi_modal_placeholders=mm_inputs["mm_placeholders"],
    )

get_dummy_mm_data abstractmethod

get_dummy_mm_data(
    seq_len: int,
    mm_counts: Mapping[str, int],
    mm_options: Mapping[str, BaseDummyOptions]
    | None = None,
) -> MultiModalDataDict

Build the multimodal input which, after processing, results in the maximum possible number of placeholder tokens.

Parameters:

Name Type Description Default
seq_len int

Sequence length

required
mm_counts Mapping[str, int]

Count of items per modality

required
mm_options Mapping[str, BaseDummyOptions] | None

Configurable options per modality (optional). If None, use model defaults for backward compatibility. If provided, models can use these to customize dummy data generation.

None
Source code in vllm/multimodal/profiling.py
@abstractmethod
def get_dummy_mm_data(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
    mm_options: Mapping[str, BaseDummyOptions] | None = None,
) -> MultiModalDataDict:
    """
    Build the multimodal input which, after processing, results in
    the maximum possible number of placeholder tokens.

    Args:
        seq_len: Sequence length
        mm_counts: Count of items per modality
        mm_options: Configurable options per modality (optional).
                   If None, use model defaults for backward compatibility.
                   If provided, models can use these to customize dummy
                   data generation.
    """
    raise NotImplementedError

get_dummy_mm_inputs

get_dummy_mm_inputs(
    processor: BaseMultiModalProcessor[_I],
    seq_len: int,
    mm_counts: Mapping[str, int] | None = None,
    mm_options: Mapping[str, BaseDummyOptions]
    | None = None,
) -> MultiModalInputs
Source code in vllm/multimodal/profiling.py
def get_dummy_mm_inputs(
    self,
    processor: BaseMultiModalProcessor[_I],
    seq_len: int,
    mm_counts: Mapping[str, int] | None = None,
    mm_options: Mapping[str, BaseDummyOptions] | None = None,
) -> MultiModalInputs:
    if mm_counts is None:
        mm_counts = processor.allowed_mm_limits

    processor_inputs = self.get_dummy_processor_inputs(
        seq_len,
        mm_counts=mm_counts,
        mm_options=mm_options,
    )

    return processor.apply(
        prompt=processor_inputs.prompt,
        mm_data=processor_inputs.mm_data,
        hf_processor_mm_kwargs=processor_inputs.hf_processor_mm_kwargs,
        tokenization_kwargs=processor_inputs.tokenization_kwargs,
    )

get_dummy_processor_inputs

get_dummy_processor_inputs(
    seq_len: int,
    mm_counts: Mapping[str, int],
    mm_options: Mapping[str, BaseDummyOptions]
    | None = None,
) -> ProcessorInputs

Build the input which, after processing, results in the maximum possible number of placeholder tokens.

Parameters:

Name Type Description Default
seq_len int

Sequence length

required
mm_counts Mapping[str, int]

Count of items per modality

required
mm_options Mapping[str, BaseDummyOptions] | None

Configurable options per modality (optional)

None
Source code in vllm/multimodal/profiling.py
def get_dummy_processor_inputs(
    self,
    seq_len: int,
    mm_counts: Mapping[str, int],
    mm_options: Mapping[str, BaseDummyOptions] | None = None,
) -> ProcessorInputs:
    """
    Build the input which, after processing, results in
    the maximum possible number of placeholder tokens.

    Args:
        seq_len: Sequence length
        mm_counts: Count of items per modality
        mm_options: Configurable options per modality (optional)
    """
    dummy_text = self.get_dummy_text(mm_counts)

    # Use the unified function for both legacy and configurable cases
    dummy_mm_data = self.get_dummy_mm_data(seq_len, mm_counts, mm_options)

    tokenization_kwargs = {"truncation": False}

    return ProcessorInputs(
        prompt=dummy_text,
        mm_data=dummy_mm_data,
        tokenization_kwargs=tokenization_kwargs,
    )

get_dummy_text abstractmethod

get_dummy_text(mm_counts: Mapping[str, int]) -> str

Build the text input corresponding to mm_counts.

Source code in vllm/multimodal/profiling.py
@abstractmethod
def get_dummy_text(self, mm_counts: Mapping[str, int]) -> str:
    """
    Build the text input corresponding to `mm_counts`.
    """
    raise NotImplementedError

DummyDecoderData

Bases: NamedTuple

Dummy data used for profiling.

Source code in vllm/multimodal/profiling.py
class DummyDecoderData(NamedTuple):
    """Dummy data used for profiling."""

    prompt_token_ids: list[int]
    multi_modal_data: MultiModalKwargsItems
    multi_modal_placeholders: MultiModalPlaceholderDict

multi_modal_data instance-attribute

multi_modal_data: MultiModalKwargsItems

multi_modal_placeholders instance-attribute

multi_modal_placeholders: MultiModalPlaceholderDict

prompt_token_ids instance-attribute

prompt_token_ids: list[int]

DummyEncoderData

Bases: NamedTuple

Dummy data used for profiling.

Source code in vllm/multimodal/profiling.py
class DummyEncoderData(NamedTuple):
    """Dummy data used for profiling."""

    prompt_token_ids: list[int]

prompt_token_ids instance-attribute

prompt_token_ids: list[int]

ProcessorInputs dataclass

Represents the keyword arguments to vllm.multimodal.processing.BaseMultiModalProcessor.apply.

Source code in vllm/multimodal/profiling.py
@dataclass
class ProcessorInputs:
    """
    Represents the keyword arguments to
    [`vllm.multimodal.processing.BaseMultiModalProcessor.apply`][].
    """

    prompt: str | list[int]
    mm_data: MultiModalDataDict
    hf_processor_mm_kwargs: Mapping[str, object] = field(default_factory=dict)
    tokenization_kwargs: Mapping[str, object] = field(default_factory=dict)

hf_processor_mm_kwargs class-attribute instance-attribute

hf_processor_mm_kwargs: Mapping[str, object] = field(
    default_factory=dict
)

mm_data instance-attribute

prompt instance-attribute

prompt: str | list[int]

tokenization_kwargs class-attribute instance-attribute

tokenization_kwargs: Mapping[str, object] = field(
    default_factory=dict
)

__init__

__init__(
    prompt: str | list[int],
    mm_data: MultiModalDataDict,
    hf_processor_mm_kwargs: Mapping[str, object] = dict(),
    tokenization_kwargs: Mapping[str, object] = dict(),
) -> None