Skip to content

vllm.model_executor.models.minicpmv

Inference-only MiniCPM-V model compatible with HuggingFace weights.

MiniCPMV

Bases: MiniCPMVBaseModel, SupportsMultiModal, SupportsLoRA

Different versions of MiniCPMV use different visual encoders and LLMs, which is not conducive to the current integration logic of LoRA and bitsandbytes in vLLM. Therefore, it is necessary to separate them.

Source code in vllm/model_executor/models/minicpmv.py
@MULTIMODAL_REGISTRY.register_processor(
    MiniCPMVMultiModalProcessor,
    info=MiniCPMVProcessingInfo,
    dummy_inputs=MiniCPMVDummyInputsBuilder,
)
class MiniCPMV(MiniCPMVBaseModel, SupportsMultiModal, SupportsLoRA):
    """
    Different versions of MiniCPMV use different visual encoders and LLMs,
    which is not conducive to the current integration logic of LoRA and
    bitsandbytes in vLLM. Therefore, it is necessary to separate them.
    """

    def __new__(cls, *, vllm_config: VllmConfig, prefix: str = ""):
        config = vllm_config.model_config.hf_config
        if not hasattr(config, "version"):
            if config.hidden_size == 2304 and config.query_num == 64:
                version = (2, 0)
            else:
                version = (2, 5)
        else:
            version = str(config.version).split(".")
            version = tuple([int(x) for x in version])
        # Dispatch class based on version
        instance_cls = _SUPPORT_VERSION.get(version)
        if instance_cls is None:
            supported_versions = ", ".join(
                [f"{v[0]}.{v[1]}" for v in sorted(_SUPPORT_VERSION.keys())]
            )
            raise ValueError(
                f"Currently, MiniCPMV only supports versions "
                f"{supported_versions}. Got version: {version}"
            )

        # quant_config references base class members,
        # so update values before init is called
        cls.packed_modules_mapping.update(instance_cls.packed_modules_mapping)
        cls.embedding_modules.update(instance_cls.embedding_modules)
        return instance_cls(vllm_config=vllm_config, prefix=prefix)

MiniCPMVBaseModel

Bases: Module, SupportsMultiModal, SupportsPP

The abstract class of MiniCPMV can only be inherited, but cannot be instantiated.

Source code in vllm/model_executor/models/minicpmv.py
class MiniCPMVBaseModel(nn.Module, SupportsMultiModal, SupportsPP):
    """
    The abstract class of MiniCPMV can only be inherited, but cannot be
    instantiated.
    """

    supports_encoder_tp_data = True

    @classmethod
    def get_placeholder_str(cls, modality: str, i: int) -> str | None:
        if modality.startswith("image"):
            return "()"
        if modality.startswith("video"):
            return "(<video>./</video>)"

        raise ValueError("Only image or video modality is supported")

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        config = vllm_config.model_config.hf_config
        multimodal_config = vllm_config.model_config.multimodal_config
        quant_config = vllm_config.quant_config
        self.use_data_parallel = multimodal_config.mm_encoder_tp_mode == "data"
        super().__init__()
        # All MiniCPM-V models disable `tie_word_embeddings` but
        # `PretrainedConfig.tie_word_embeddings` defaults to True; we cannot
        # check `tie_word_embeddings` until vLLM integrate MiniCPM-V model
        # and config class
        self.config = config
        self.multimodal_config = multimodal_config

        self.version = get_version_by_config(self.config)

        with self._mark_language_model(vllm_config):
            self.llm = self.init_llm(
                vllm_config=vllm_config, prefix=maybe_prefix(prefix, "llm")
            )

        with self._mark_tower_model(vllm_config, {"image", "video"}):
            self.vpm = self.init_vision_module(
                config, quant_config, prefix=maybe_prefix(prefix, "vpm")
            )
            self.vision_dim = (
                self.vpm.embed_dim
                if self.version == (2, 0)
                else self.vpm.embeddings.embed_dim
            )
            self.embed_dim = self.config.hidden_size

            self.resampler = self.init_resampler(
                self.embed_dim,
                self.vision_dim,
                quant_config=quant_config,
                prefix=maybe_prefix(prefix, "resampler"),
            )
            self._resampler_moved = False

        self.make_empty_intermediate_tensors = self.llm.make_empty_intermediate_tensors

    def _ensure_resampler_device(self) -> None:
        if self._resampler_moved:
            return
        # Only move device, DO NOT touch dtype (fp8 quant needs its own dtype)
        self.resampler.to(current_platform.device_type)
        self._resampler_moved = True

    def _parse_and_validate_vision_input(
        self,
        modality: str,
        **kwargs: object,
    ) -> MiniCPMVImageInputs | None:
        pixel_values = kwargs.pop("pixel_values", None)
        image_embeds = kwargs.pop("image_embeds", None)
        temporal_ids = kwargs.pop("temporal_ids", None)

        if pixel_values is None and image_embeds is None:
            return None

        if image_embeds is not None:
            return MiniCPMVImageEmbeddingInputs(
                type="image_embeds",
                image_embeds=image_embeds,
            )

        tgt_sizes = kwargs.pop("tgt_sizes")

        num_slices_flat = torch.tensor([len(ps) for ps in pixel_values])
        pixel_values_flat = flatten_bn(pixel_values)
        tgt_sizes_flat = flatten_bn(tgt_sizes, concat=True)

        return MiniCPMVImagePixelInputs(
            type="pixel_values",
            pixel_values=pixel_values_flat,
            tgt_sizes=tgt_sizes_flat,
            num_slices=num_slices_flat,
            temporal_ids=temporal_ids,
        )

    def _parse_and_validate_multimodal_inputs(self, **kwargs: object) -> dict:
        modalities = {}

        # Preserve the order of modalities if there are multiple of them
        # from the order of kwargs.
        for input_key in kwargs:
            if (
                input_key in ("pixel_values", "image_embeds")
                and "images" not in modalities
            ):
                modalities["images"] = self._parse_and_validate_vision_input(
                    "images", **kwargs
                )
            if (
                input_key in ("video_pixel_values", "video_embeds")
                and "videos" not in modalities
            ):
                modalities["videos"] = self._parse_and_validate_vision_input(
                    "videos", **{k.removeprefix("video_"): v for k, v in kwargs.items()}
                )

        return modalities

    def _process_vision_input(
        self,
        image_input: MiniCPMVImageInputs,
    ) -> torch.Tensor | list[torch.Tensor] | tuple[torch.Tensor, ...]:
        if image_input["type"] == "image_embeds":
            return image_input["image_embeds"]

        image_features_flat = self.get_vision_hidden_states(image_input)

        num_slices = image_input["num_slices"]
        return [e.flatten(0, 1) for e in image_features_flat.split(num_slices.tolist())]

    def _process_multimodal_inputs(self, modalities: dict):
        # The result multimodal_embeddings is tuple of tensors, with each
        # tensor corresponding to a multimodal data item (image or video).
        multimodal_embeddings: tuple[torch.Tensor, ...] = ()

        # NOTE: It is important to iterate over the keys in this dictionary
        # to preserve the order of the modalities.
        for modality in modalities:
            if modality == "images":
                image_input = modalities["images"]
                image_embeddings = self._process_vision_input(image_input)
                multimodal_embeddings += tuple(image_embeddings)
            if modality == "videos":
                video_input = modalities["videos"]
                video_embeddings = self._process_vision_input(video_input)
                multimodal_embeddings += tuple(video_embeddings)

        return multimodal_embeddings

    def embed_multimodal(self, **kwargs: object) -> MultiModalEmbeddings:
        modalities = self._parse_and_validate_multimodal_inputs(**kwargs)
        if not modalities:
            return []

        return self._process_multimodal_inputs(modalities)

    def forward(
        self,
        input_ids: torch.Tensor | None,
        positions: torch.Tensor,
        intermediate_tensors: IntermediateTensors | None = None,
        inputs_embeds: torch.Tensor | None = None,
        **kwargs: Any,
    ) -> torch.Tensor:
        if intermediate_tensors is not None:
            inputs_embeds = None

        hidden_states = self.llm.model(
            input_ids=input_ids,
            positions=positions,
            intermediate_tensors=intermediate_tensors,
            inputs_embeds=inputs_embeds,
        )
        return hidden_states

    def compute_logits(
        self,
        hidden_states: torch.Tensor,
    ) -> torch.Tensor | None:
        return self.llm.compute_logits(hidden_states)

    def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]:
        loader = AutoWeightsLoader(self)
        loaded = loader.load_weights(weights)
        self._ensure_resampler_device()
        return loaded

    def get_mm_mapping(self) -> MultiModelKeys:
        """
        Get the module prefix in multimodal models
        """
        return MultiModelKeys.from_string_field(
            language_model="llm", connector="resampler", tower_model="vpm"
        )

    def init_llm(
        self,
        vllm_config: VllmConfig,
        prefix: str = "",
    ) -> nn.Module:
        raise NotImplementedError

    def init_vision_module(
        self,
        config: PretrainedConfig,
        quant_config: QuantizationConfig | None,
        prefix: str = "",
    ) -> nn.Module:
        raise NotImplementedError

    def init_resampler(
        self,
        embed_dim: int,
        vision_dim: int,
        quant_config: QuantizationConfig | None = None,
        prefix: str = "",
    ) -> nn.Module:
        raise NotImplementedError

    def get_vision_hidden_states(self, data: MiniCPMVImagePixelInputs) -> torch.Tensor:
        raise NotImplementedError

get_mm_mapping

get_mm_mapping() -> MultiModelKeys

Get the module prefix in multimodal models

Source code in vllm/model_executor/models/minicpmv.py
def get_mm_mapping(self) -> MultiModelKeys:
    """
    Get the module prefix in multimodal models
    """
    return MultiModelKeys.from_string_field(
        language_model="llm", connector="resampler", tower_model="vpm"
    )

MiniCPMVImageEmbeddingInputs

Bases: TensorSchema

Dimensions
  • bn: Batch size * number of images
  • ns: Number of slices
  • hs: Hidden size (must match language model backbone)
Source code in vllm/model_executor/models/minicpmv.py
class MiniCPMVImageEmbeddingInputs(TensorSchema):
    """
    Dimensions:
        - bn: Batch size * number of images
        - ns: Number of slices
        - hs: Hidden size (must match language model backbone)
    """

    type: Literal["image_embeds"]
    image_embeds: Annotated[
        torch.Tensor | list[torch.Tensor],
        TensorShape("bn", "ns", "hs", dynamic_dims={"ns"}),
    ]

MiniCPMVImagePixelInputs

Bases: TensorSchema

Dimensions
  • bns: Batch size * number of images * number of slices
  • bn: Batch size * number of images
  • c: Number of channels
  • h: Height
  • w: Width
Source code in vllm/model_executor/models/minicpmv.py
class MiniCPMVImagePixelInputs(TensorSchema):
    """
    Dimensions:
        - bns: Batch size * number of images * number of slices
        - bn: Batch size * number of images
        - c: Number of channels
        - h: Height
        - w: Width
    """

    type: Literal["pixel_values"] = "pixel_values"

    # Note that the patch size may vary, so we pass it as a list instead of a
    # batched tensor.
    pixel_values: Annotated[
        list[torch.Tensor],
        TensorShape("bns", "c", "h", "w", dynamic_dims={"h", "w"}),
    ]
    tgt_sizes: Annotated[
        torch.Tensor,
        TensorShape("bns", 2),  # This should be in `(height, width)` format.
    ]
    num_slices: Annotated[
        torch.Tensor,
        TensorShape("bn"),
    ]

    # Handled as batched input but shape check via TensorShape
    # isn't strictly necessary since it defaults to None
    # and has a non-tensor type.
    temporal_ids: list[list[int]] | None = None

Resampler4_5

Bases: Resampler2_5

Source code in vllm/model_executor/models/minicpmv.py
class Resampler4_5(Resampler2_5):
    def __init__(
        self,
        num_queries: int,
        embed_dim: int,
        num_heads: int,
        kv_dim: int | None = None,
        norm_layer: Callable[[int], nn.LayerNorm] = DEFAULT_LN,
        max_size: tuple[int, int] = (70, 70),
        max_temporal_size: int = 36000,
        quant_config: QuantizationConfig | None = None,
        prefix: str = "",
    ) -> None:
        super().__init__(
            num_queries,
            embed_dim,
            num_heads,
            kv_dim,
            norm_layer,
            max_size,
            quant_config=quant_config,
            prefix=prefix,
        )

        trunc_normal_(self.query, std=0.02)
        self.max_temporal_size = max_temporal_size
        self._set_temporal_pos_cache(self.max_temporal_size)
        self.apply(self._init_weights)

    def get_1d_sincos_pos_embed_from_temporal_size(
        self, embed_dim: int, pos: np.ndarray
    ):
        """
        embed_dim: output dimension for each position
        pos: a list of positions to be encoded: size (M,)
        out: (M, D)
        """
        assert embed_dim % 2 == 0
        omega = np.arange(embed_dim // 2, dtype=np.float32)
        omega /= embed_dim / 2.0
        omega = 1.0 / 10000**omega  # (D/2,)

        pos = pos.reshape(-1)  # (M,)
        out = np.einsum("m,d->md", pos, omega)  # (M, D/2), outer product

        emb_sin = np.sin(out)  # (M, D/2)
        emb_cos = np.cos(out)  # (M, D/2)

        emb = np.concatenate([emb_sin, emb_cos], axis=1)  # (M, D)
        return emb

    def _set_temporal_pos_cache(
        self, max_temporal_size: int, device: torch.types.Device = "cpu"
    ) -> None:
        temporal_size = np.arange(max_temporal_size, dtype=np.float32)
        pos_embed = (
            torch.from_numpy(
                self.get_1d_sincos_pos_embed_from_temporal_size(
                    self.embed_dim, temporal_size
                )
            )
            .float()
            .to(device)
        )
        self.register_buffer("temporal_pos_embed", pos_embed, persistent=False)

    def _adjust_temporal_pos_cache(
        self, max_temporal_size: int, device: torch.types.Device = "cpu"
    ):
        if max_temporal_size > self.max_temporal_size:
            self.max_temporal_size = max_temporal_size
            self._set_temporal_pos_cache(self.max_temporal_size, device)

    def _init_weights(self, m: nn.Linear | nn.LayerNorm):
        if isinstance(m, nn.Linear):
            trunc_normal_(m.weight, std=0.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    def forward(
        self,
        x: torch.Tensor,
        tgt_sizes: torch.Tensor,
        # temporal_ids for high refresh rate videos
        temporal_ids=None,
    ) -> torch.Tensor:
        assert x.shape[0] == tgt_sizes.shape[0]
        bs = x.shape[0]

        device = x.device
        dtype = x.dtype

        patch_len = tgt_sizes[:, 0] * tgt_sizes[:, 1]

        # In eager mode or during capture, adjust the cache size if necessary.
        # We safely use .max().item() here as it only runs once per graph capture
        # and sees the maximum possible sizes.
        max_h = tgt_sizes[:, 0].max().item()
        max_w = tgt_sizes[:, 1].max().item()
        if max_h > self.max_size[0] or max_w > self.max_size[1]:
            self._adjust_pos_cache(tgt_sizes, device=device)

        max_patch_len = x.shape[1]

        temporal_pos_emb = False
        temporal_ids_flatten = None
        if temporal_ids is not None:
            if isinstance(temporal_ids, torch.Tensor):
                temporal_ids_flatten = temporal_ids
                max_temporal_size = temporal_ids_flatten.max().item()
            else:
                # example: [[-1], [-1], [2, 6, 9]]
                temporal_ids_flatten = list(chain.from_iterable(temporal_ids))
                max_temporal_size = max(temporal_ids_flatten, default=0)
                temporal_ids_flatten = torch.tensor(
                    temporal_ids_flatten, dtype=torch.long, device=device
                )

            if max_temporal_size > -1:
                temporal_pos_emb = True
            if max_temporal_size > self.max_temporal_size:
                self._adjust_temporal_pos_cache(max_temporal_size, device)

        seq_idx = torch.arange(max_patch_len, device=device).unsqueeze(0)

        tgt_w = tgt_sizes[:, 1].unsqueeze(1)
        tgt_w = torch.clamp(tgt_w, min=1)

        h_idx = seq_idx // tgt_w
        w_idx = seq_idx % tgt_w

        h_idx = torch.clamp(h_idx, max=self.pos_embed.shape[0] - 1)
        w_idx = torch.clamp(w_idx, max=self.pos_embed.shape[1] - 1)

        pos_embed_2d = self.pos_embed[h_idx, w_idx].to(dtype)

        key_padding_mask = seq_idx >= patch_len.unsqueeze(1)

        pos_embed_2d = torch.where(
            key_padding_mask.unsqueeze(-1), torch.zeros_like(pos_embed_2d), pos_embed_2d
        ).permute(1, 0, 2)  # (L, bs, D)

        x, _ = self.kv_proj(x)  # B * L * D
        x = self.ln_kv(x).permute(1, 0, 2)  # L * B * D
        q = self.ln_q(self.query)  # Q * D

        k = x + pos_embed_2d
        v = x

        if temporal_pos_emb:
            # temporal_ids_flatten is 1D tensor of shape (bs,)
            pos_embed_temporal = torch.where(
                (temporal_ids_flatten == -1).unsqueeze(-1),
                torch.zeros(self.embed_dim, dtype=dtype, device=device),
                self.temporal_pos_embed[torch.clamp(temporal_ids_flatten, min=0)].to(
                    dtype
                ),
            )  # (bs, D)

            k += pos_embed_temporal.unsqueeze(0)  # (L, bs, D) + (1, bs, D)

            # skip the cross-frame merge loop when compiling into a CUDA graph
            # (which occurs when temporal_ids is passed as a flat Tensor) because
            # dynamic sequence lengths and batch sizes cannot be captured.
            if not isinstance(temporal_ids, torch.Tensor):
                bs = len(temporal_ids)
                merge_k = []
                merge_v = []
                merge_key_padding_mask = []

                start = 0
                for tp in temporal_ids:
                    end = start + len(tp)
                    # L * (end-start) * D -> (end-start) * L * D
                    # -> 1 * L*(end-start) * D
                    merge_k.append(
                        k[:, start:end, :].permute(1, 0, 2).reshape(-1, self.embed_dim)
                    )
                    merge_v.append(
                        v[:, start:end, :].permute(1, 0, 2).reshape(-1, self.embed_dim)
                    )
                    merge_key_padding_mask.append(
                        key_padding_mask[start:end, :].reshape(-1, 1)
                    )

                    start = end

                k = torch.nn.utils.rnn.pad_sequence(
                    merge_k, batch_first=True, padding_value=0.0
                ).permute(1, 0, 2)  # L*(end-start)
                v = torch.nn.utils.rnn.pad_sequence(
                    merge_v, batch_first=True, padding_value=0.0
                ).permute(1, 0, 2)  # L*(end-start)
                key_padding_mask = torch.nn.utils.rnn.pad_sequence(
                    merge_key_padding_mask, batch_first=True, padding_value=True
                ).squeeze(-1)

        out = self.attn(
            self._repeat(q, bs),  # Q * B * D
            k,  # L * B * D +  L * B * D
            v,
            key_padding_mask=key_padding_mask,
        )[0]
        #  out: Q * B * D
        x = out.permute(1, 0, 2)  # B * Q * D

        x = self.ln_post(x)
        x = x @ self.proj
        return x

get_1d_sincos_pos_embed_from_temporal_size

get_1d_sincos_pos_embed_from_temporal_size(
    embed_dim: int, pos: ndarray
)

embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D)

Source code in vllm/model_executor/models/minicpmv.py
def get_1d_sincos_pos_embed_from_temporal_size(
    self, embed_dim: int, pos: np.ndarray
):
    """
    embed_dim: output dimension for each position
    pos: a list of positions to be encoded: size (M,)
    out: (M, D)
    """
    assert embed_dim % 2 == 0
    omega = np.arange(embed_dim // 2, dtype=np.float32)
    omega /= embed_dim / 2.0
    omega = 1.0 / 10000**omega  # (D/2,)

    pos = pos.reshape(-1)  # (M,)
    out = np.einsum("m,d->md", pos, omega)  # (M, D/2), outer product

    emb_sin = np.sin(out)  # (M, D/2)
    emb_cos = np.cos(out)  # (M, D/2)

    emb = np.concatenate([emb_sin, emb_cos], axis=1)  # (M, D)
    return emb

_MiniCPMVEncoderCudaGraphMixin

Bases: SupportsEncoderCudaGraph

SupportsEncoderCudaGraph for MiniCPM-V Idefics2 + resampler (not 2.0).

Source code in vllm/model_executor/models/minicpmv.py
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
class _MiniCPMVEncoderCudaGraphMixin(SupportsEncoderCudaGraph):
    """SupportsEncoderCudaGraph for MiniCPM-V Idefics2 + resampler (not 2.0)."""

    supports_encoder_cudagraph: ClassVar[Literal[True]] = True

    def _mcpmv_slice_pixel_size(self) -> tuple[int, int]:
        """Return (pixel_height, pixel_width) for each slice fed into vpm.

        Every slice is resized to image_size x image_size pixels before being
        passed to the vision encoder, so this is always (image_size, image_size)
        regardless of max_slice_num.
        """
        image_size = int(self.vpm.embeddings.image_size)
        return image_size, image_size

    def _mcpmv_max_patches_per_slice(self) -> int:
        """Return max patch count per slice for patch_attention_mask sizing.

        The vision encoder divides each (image_size x image_size) slice into
        (image_size // patch_size)^2 patches, so this equals that value.
        """
        image_size = int(self.vpm.embeddings.image_size)
        patch_size = int(self.vpm.embeddings.patch_size)
        return (image_size // patch_size) ** 2

    def _mcpmv_max_slices_cap(
        self,
        token_budget: int,
        max_batch_size: int,
        max_frames_per_batch: int,
    ) -> int:
        max_slice_num = int(getattr(self.config, "max_slice_num", 9))
        query_num = max(1, int(self.config.query_num))
        # Each slice produces query_num output tokens, so token_budget caps slices.
        max_slices_by_token_budget = max(1, token_budget // query_num)
        # Buffer must fit the largest possible input from either modality.
        # Image batch:  max_batch_size images × (max_slice_num + 1) slices each.
        # Video batch:  max_frames_per_batch frames × (max_slice_num + 1) slices each.
        # Both modalities share the same captured graph, so take the larger of the two.
        max_slices_by_content = max_batch_size * (max_slice_num + 1)
        if self.version in {(2, 6), (4, 0), (4, 5)} and max_frames_per_batch > 0:
            max_slices_by_content = max(
                max_slices_by_content,
                max_frames_per_batch * (max_slice_num + 1),
            )
        return max(1, min(max_slices_by_token_budget, max_slices_by_content))

    def get_encoder_cudagraph_config(self) -> EncoderCudaGraphConfig:
        buffer_keys = [
            _MINICPMV_CUDAGRAPH_BUF_KEY_TGT_SIZES,
            _MINICPMV_CUDAGRAPH_BUF_KEY_PATCH_MASK,
        ]
        if self.version == (4, 5):
            buffer_keys.append(_MINICPMV_CUDAGRAPH_BUF_KEY_TEMPORAL_IDS)
        # Video is only supported from 2.6 onward.
        modalities = ["image"]
        if self.version in {(2, 6), (4, 0), (4, 5)}:
            modalities.append("video")
        return EncoderCudaGraphConfig(
            modalities=modalities,
            input_key_by_modality={
                "image": _MINICPMV_CUDAGRAPH_FLAT_KEY_IMAGE,
                "video": _MINICPMV_CUDAGRAPH_FLAT_KEY_VIDEO,
            },
            buffer_keys=buffer_keys,
            out_hidden_size=int(self.embed_dim),
        )

    def get_input_modality(self, mm_kwargs: dict[str, Any]) -> str:
        if "video_pixel_values" in mm_kwargs:
            return "video"
        return "image"

    def get_max_frames_per_video(self) -> int:
        info = MULTIMODAL_REGISTRY.get_processing_info(self.vllm_config.model_config)
        return int(
            info.get_num_frames_with_most_features(
                seq_len=self.vllm_config.model_config.max_model_len,
                mm_counts={
                    "video": self.multimodal_config.get_limit_per_prompt("video")
                },
            )
        )

    def get_encoder_cudagraph_budget_range(
        self, vllm_config: VllmConfig
    ) -> tuple[int, int]:
        # Each slice produces exactly query_num resampler output tokens.
        # A thumbnail-only image has 1 slice, so query_num is the smallest
        # possible encoder output and the natural minimum budget.
        min_budget = int(self.config.query_num)
        max_budget = min(
            vllm_config.scheduler_config.max_num_batched_tokens,
            vllm_config.model_config.max_model_len,
        )
        return (min_budget, max_budget)

    def get_encoder_cudagraph_num_items(self, mm_kwargs: dict[str, Any]) -> int:
        video = self.get_input_modality(mm_kwargs) == "video"
        pixel_values_key = "video_pixel_values" if video else "pixel_values"
        return len(mm_kwargs[pixel_values_key])

    def get_encoder_cudagraph_per_item_output_tokens(
        self, mm_kwargs: dict[str, Any]
    ) -> list[int]:
        query_num = int(self.config.query_num)
        video = self.get_input_modality(mm_kwargs) == "video"
        pixel_values_key = "video_pixel_values" if video else "pixel_values"
        pixel_values: list[list[torch.Tensor]] = mm_kwargs[pixel_values_key]
        return [len(img) * query_num for img in pixel_values]

    def get_encoder_cudagraph_per_item_input_sizes(
        self, mm_kwargs: dict[str, Any]
    ) -> list[int]:
        video = self.get_input_modality(mm_kwargs) == "video"
        tgt_sizes = _mcpmv_tgt_sizes_tensor(mm_kwargs, video=video)
        pixel_values: list[list[torch.Tensor]] = mm_kwargs[
            "video_pixel_values" if video else "pixel_values"
        ]
        slice_counts = [len(img) for img in pixel_values]
        patch_sums = tgt_sizes.prod(-1)
        return [
            int(group.sum().item()) for group in torch.split(patch_sums, slice_counts)
        ]

    def select_encoder_cudagraph_items(
        self,
        mm_kwargs: dict[str, Any],
        indices: list[int],
    ) -> dict[str, Any]:
        video = self.get_input_modality(mm_kwargs) == "video"
        pixel_values_key = "video_pixel_values" if video else "pixel_values"
        tgt_key = "video_tgt_sizes" if video else "tgt_sizes"
        flat_key = (
            _MINICPMV_CUDAGRAPH_FLAT_KEY_VIDEO
            if video
            else _MINICPMV_CUDAGRAPH_FLAT_KEY_IMAGE
        )
        device = next(self.vpm.parameters()).device
        pixel_h, pixel_w = self._mcpmv_slice_pixel_size()

        pixel_values: list[list[torch.Tensor]] = mm_kwargs[pixel_values_key]
        tgt_sizes = _mcpmv_tgt_sizes_tensor(mm_kwargs, video=video)

        # Base dict without the stale flat buffer (recomputed at the end).
        subset = {k: v for k, v in mm_kwargs.items() if k != flat_key}

        if not indices:
            subset.update(
                {
                    pixel_values_key: [],
                    tgt_key: torch.zeros((0, 2), dtype=torch.long, device=device),
                    flat_key: torch.zeros(
                        (0, 3 * pixel_h * pixel_w), device=device, dtype=torch.float32
                    ),
                }
            )
            if self.version == (4, 5):
                subset["temporal_ids"] = None
            return subset

        # Select per-item nested slices and matching tgt_sizes rows.
        slice_counts = [len(item_slices) for item_slices in pixel_values]
        tgt_groups = torch.split(tgt_sizes, slice_counts)
        selected_pixel_values = [pixel_values[i] for i in indices]
        selected_tgt_sizes = torch.cat([tgt_groups[i] for i in indices], dim=0)

        # Pack ragged [3, H, W_i] slices into fixed [num_slices, 3*H*W] buffer.
        selected_slices = flatten_2d_lists(selected_pixel_values)
        packed_flat_pixels = _mcpmv_pack_flat_pixels(
            selected_slices,
            pixel_height=pixel_h,
            pixel_width=pixel_w,
            max_num_slices=len(selected_slices),
            device=selected_slices[0].device,
            dtype=selected_slices[0].dtype,
        )

        subset.update(
            {
                pixel_values_key: selected_pixel_values,
                tgt_key: selected_tgt_sizes,
                flat_key: packed_flat_pixels,
            }
        )
        if self.version == (4, 5):
            temporal_ids = mm_kwargs.get("temporal_ids")
            if temporal_ids is not None:
                subset["temporal_ids"] = [temporal_ids[i] for i in indices]
        return subset

    def prepare_encoder_cudagraph_capture_inputs(
        self,
        token_budget: int,
        max_batch_size: int,
        max_frames_per_batch: int,
        device: torch.device,
        dtype: torch.dtype,
    ) -> EncoderCudaGraphCaptureInputs:
        pixel_h, pixel_w = self._mcpmv_slice_pixel_size()
        max_patches = self._mcpmv_max_patches_per_slice()
        max_num_slices = self._mcpmv_max_slices_cap(
            token_budget,
            max_batch_size,
            max_frames_per_batch,
        )
        flat_dim = 3 * pixel_h * pixel_w
        flat_pixel_buffer = torch.zeros(
            (max_num_slices, flat_dim), device=device, dtype=dtype
        )
        patch_hw = pixel_h // int(self.vpm.embeddings.patch_size)
        dummy_tgt_sizes = torch.full(
            (max_num_slices, 2), patch_hw, dtype=torch.long, device=device
        )
        dummy_patch_mask = torch.ones(
            (max_num_slices, max_patches), dtype=torch.bool, device=device
        )
        buffers: dict[str, torch.Tensor] = {
            _MINICPMV_CUDAGRAPH_BUF_KEY_TGT_SIZES: dummy_tgt_sizes,
            _MINICPMV_CUDAGRAPH_BUF_KEY_PATCH_MASK: dummy_patch_mask,
        }
        if self.version == (4, 5):
            buffers[_MINICPMV_CUDAGRAPH_BUF_KEY_TEMPORAL_IDS] = torch.full(
                (max_num_slices,), -1, dtype=torch.long, device=device
            )
        mm_kwargs: dict[str, Any] = {
            _MINICPMV_CUDAGRAPH_FLAT_KEY_IMAGE: flat_pixel_buffer,
        }
        return EncoderCudaGraphCaptureInputs(mm_kwargs=mm_kwargs, buffers=buffers)

    def prepare_encoder_cudagraph_replay_buffers(
        self,
        mm_kwargs: dict[str, Any],
        max_batch_size: int,
        max_frames_per_batch: int,
    ) -> EncoderCudaGraphReplayBuffers:
        _ = max_frames_per_batch
        _ = max_batch_size
        video = self.get_input_modality(mm_kwargs) == "video"
        max_patches = self._mcpmv_max_patches_per_slice()
        device = next(self.vpm.parameters()).device
        # After select_encoder_cudagraph_items, tgt_sizes contains exactly one
        # row per selected slice, so its length equals the total slice count.
        tgt_sizes = _mcpmv_tgt_sizes_tensor(mm_kwargs, video=video).to(
            device=device, dtype=torch.long
        )

        patches_per_slice = tgt_sizes.prod(-1).clamp(max=max_patches)
        col_idx = torch.arange(max_patches, device=device)
        patch_attention_mask = col_idx.unsqueeze(0) < patches_per_slice.unsqueeze(1)

        buffers: dict[str, torch.Tensor] = {
            _MINICPMV_CUDAGRAPH_BUF_KEY_TGT_SIZES: tgt_sizes.clone(),
            _MINICPMV_CUDAGRAPH_BUF_KEY_PATCH_MASK: patch_attention_mask,
        }
        if self.version == (4, 5):
            temporal_ids = mm_kwargs.get("temporal_ids")
            if temporal_ids is not None:
                # temporal_ids is list[list[int]] (per-image, per-slice).
                flat_ids = torch.tensor(
                    flatten_2d_lists(temporal_ids), dtype=torch.long, device=device
                )
            else:
                flat_ids = torch.full(
                    (len(tgt_sizes),), -1, dtype=torch.long, device=device
                )
            buffers[_MINICPMV_CUDAGRAPH_BUF_KEY_TEMPORAL_IDS] = flat_ids
        return EncoderCudaGraphReplayBuffers(buffers=buffers)

    def encoder_cudagraph_forward(
        self,
        mm_kwargs: dict[str, Any],
        buffers: dict[str, torch.Tensor],
    ) -> torch.Tensor:
        modality = self.get_input_modality(mm_kwargs)
        flat_key = (
            _MINICPMV_CUDAGRAPH_FLAT_KEY_VIDEO
            if modality == "video"
            else _MINICPMV_CUDAGRAPH_FLAT_KEY_IMAGE
        )
        flat_pixel_buffer = mm_kwargs[flat_key]
        pixel_h, pixel_w = self._mcpmv_slice_pixel_size()
        max_num_slices, flat_dim = flat_pixel_buffer.shape
        assert flat_dim == 3 * pixel_h * pixel_w
        all_pixel_values = flat_pixel_buffer.view(max_num_slices, 3, pixel_h, pixel_w)

        tgt_sizes = buffers[_MINICPMV_CUDAGRAPH_BUF_KEY_TGT_SIZES]
        patch_attention_mask = buffers[
            _MINICPMV_CUDAGRAPH_BUF_KEY_PATCH_MASK
        ].unsqueeze(1)

        # v2.5 vpm does not accept tgt_sizes.
        vpm_tgt_sizes = None if self.version == (2, 5) else tgt_sizes
        vision_embedding = self.vpm(
            all_pixel_values,
            patch_attention_mask=patch_attention_mask,
            tgt_sizes=vpm_tgt_sizes,
        )

        if self.version == (4, 5):
            temporal_ids = buffers[_MINICPMV_CUDAGRAPH_BUF_KEY_TEMPORAL_IDS]
            resampler_out = self.resampler(vision_embedding, tgt_sizes, temporal_ids)
        else:
            resampler_out = self.resampler(vision_embedding, tgt_sizes)

        query_num = int(self.config.query_num)
        return resampler_out.reshape(max_num_slices * query_num, int(self.embed_dim))

    def encoder_eager_forward(self, mm_kwargs: dict[str, Any]) -> torch.Tensor:
        """Eager encoder path; returns ``(total_tokens, embed_dim)`` like
        ``encoder_cudagraph_forward``.

        Called by the manager only for images/videos that exceed all token
        budgets (single-item batches), so ``segments`` always has exactly one
        element in practice.  Version-specific logic (e.g. temporal embeddings
        for v4.5) is handled transparently by the polymorphic dispatch inside
        ``get_vision_hidden_states``.
        """
        mm_kwargs_no_flat = {
            k: v
            for k, v in mm_kwargs.items()
            if k
            not in (
                _MINICPMV_CUDAGRAPH_FLAT_KEY_IMAGE,
                _MINICPMV_CUDAGRAPH_FLAT_KEY_VIDEO,
            )
        }
        modalities = self._parse_and_validate_multimodal_inputs(**mm_kwargs_no_flat)
        segments: list[torch.Tensor] = []
        embed_dim = self.embed_dim
        for modality in modalities:
            if modality == "images":
                image_input = modalities["images"]
                image_embeddings = self.get_vision_hidden_states(image_input)
                segments.append(image_embeddings.reshape(-1, embed_dim))
            elif modality == "videos":
                video_input = modalities["videos"]
                video_embeddings = self.get_vision_hidden_states(video_input)
                segments.append(video_embeddings.reshape(-1, embed_dim))
        if not segments:
            raise RuntimeError(
                "MiniCPM-V encoder cudagraph eager path expects pixel_values "
                "or video_pixel_values"
            )
        return torch.cat(segments, dim=0)

_mcpmv_max_patches_per_slice

_mcpmv_max_patches_per_slice() -> int

Return max patch count per slice for patch_attention_mask sizing.

The vision encoder divides each (image_size x image_size) slice into (image_size // patch_size)^2 patches, so this equals that value.

Source code in vllm/model_executor/models/minicpmv.py
def _mcpmv_max_patches_per_slice(self) -> int:
    """Return max patch count per slice for patch_attention_mask sizing.

    The vision encoder divides each (image_size x image_size) slice into
    (image_size // patch_size)^2 patches, so this equals that value.
    """
    image_size = int(self.vpm.embeddings.image_size)
    patch_size = int(self.vpm.embeddings.patch_size)
    return (image_size // patch_size) ** 2

_mcpmv_slice_pixel_size

_mcpmv_slice_pixel_size() -> tuple[int, int]

Return (pixel_height, pixel_width) for each slice fed into vpm.

Every slice is resized to image_size x image_size pixels before being passed to the vision encoder, so this is always (image_size, image_size) regardless of max_slice_num.

Source code in vllm/model_executor/models/minicpmv.py
def _mcpmv_slice_pixel_size(self) -> tuple[int, int]:
    """Return (pixel_height, pixel_width) for each slice fed into vpm.

    Every slice is resized to image_size x image_size pixels before being
    passed to the vision encoder, so this is always (image_size, image_size)
    regardless of max_slice_num.
    """
    image_size = int(self.vpm.embeddings.image_size)
    return image_size, image_size

encoder_eager_forward

encoder_eager_forward(mm_kwargs: dict[str, Any]) -> Tensor

Eager encoder path; returns (total_tokens, embed_dim) like encoder_cudagraph_forward.

Called by the manager only for images/videos that exceed all token budgets (single-item batches), so segments always has exactly one element in practice. Version-specific logic (e.g. temporal embeddings for v4.5) is handled transparently by the polymorphic dispatch inside get_vision_hidden_states.

Source code in vllm/model_executor/models/minicpmv.py
def encoder_eager_forward(self, mm_kwargs: dict[str, Any]) -> torch.Tensor:
    """Eager encoder path; returns ``(total_tokens, embed_dim)`` like
    ``encoder_cudagraph_forward``.

    Called by the manager only for images/videos that exceed all token
    budgets (single-item batches), so ``segments`` always has exactly one
    element in practice.  Version-specific logic (e.g. temporal embeddings
    for v4.5) is handled transparently by the polymorphic dispatch inside
    ``get_vision_hidden_states``.
    """
    mm_kwargs_no_flat = {
        k: v
        for k, v in mm_kwargs.items()
        if k
        not in (
            _MINICPMV_CUDAGRAPH_FLAT_KEY_IMAGE,
            _MINICPMV_CUDAGRAPH_FLAT_KEY_VIDEO,
        )
    }
    modalities = self._parse_and_validate_multimodal_inputs(**mm_kwargs_no_flat)
    segments: list[torch.Tensor] = []
    embed_dim = self.embed_dim
    for modality in modalities:
        if modality == "images":
            image_input = modalities["images"]
            image_embeddings = self.get_vision_hidden_states(image_input)
            segments.append(image_embeddings.reshape(-1, embed_dim))
        elif modality == "videos":
            video_input = modalities["videos"]
            video_embeddings = self.get_vision_hidden_states(video_input)
            segments.append(video_embeddings.reshape(-1, embed_dim))
    if not segments:
        raise RuntimeError(
            "MiniCPM-V encoder cudagraph eager path expects pixel_values "
            "or video_pixel_values"
        )
    return torch.cat(segments, dim=0)

_mcpmv_pack_flat_pixels

_mcpmv_pack_flat_pixels(
    slices: list[Tensor],
    *,
    pixel_height: int,
    pixel_width: int,
    max_num_slices: int,
    device: device,
    dtype: dtype,
) -> Tensor

Pack slice tensors into a fixed (max_num_slices, 3*H*W) buffer.

Every slice is image_size × image_size (see _mcpmv_slice_pixel_size), so all rows in the buffer are fully occupied.

Source code in vllm/model_executor/models/minicpmv.py
def _mcpmv_pack_flat_pixels(
    slices: list[torch.Tensor],
    *,
    pixel_height: int,
    pixel_width: int,
    max_num_slices: int,
    device: torch.device,
    dtype: torch.dtype,
) -> torch.Tensor:
    """Pack slice tensors into a fixed ``(max_num_slices, 3*H*W)`` buffer.

    Every slice is ``image_size × image_size`` (see ``_mcpmv_slice_pixel_size``),
    so all rows in the buffer are fully occupied.
    """
    flat_dim = 3 * pixel_height * pixel_width
    packed = torch.zeros((max_num_slices, flat_dim), device=device, dtype=dtype)
    n = min(len(slices), max_num_slices)
    if n > 0:
        packed[:n] = torch.stack(slices[:n]).reshape(n, -1).to(dtype=dtype)
    return packed