Skip to content

vllm.model_executor.models.flex_olmo

Inference-only FlexOlmo model compatible with HuggingFace weights.

logger module-attribute

logger = init_logger(__name__)

FlexOlmoAttention

Bases: OlmoeAttention

Source code in vllm/model_executor/models/flex_olmo.py
class FlexOlmoAttention(OlmoeAttention):
    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__(vllm_config=vllm_config, prefix=prefix)

        hf_config = vllm_config.model_config.hf_config
        assert isinstance(hf_config, FlexOlmoConfig)

        self.k_norm = RMSNorm(
            self.total_num_kv_heads * self.head_dim, eps=hf_config.rms_norm_eps
        )
        self.q_norm = RMSNorm(
            self.total_num_heads * self.head_dim, eps=hf_config.rms_norm_eps
        )

k_norm instance-attribute

k_norm = RMSNorm(
    total_num_kv_heads * head_dim, eps=rms_norm_eps
)

q_norm instance-attribute

q_norm = RMSNorm(
    total_num_heads * head_dim, eps=rms_norm_eps
)

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/flex_olmo.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__(vllm_config=vllm_config, prefix=prefix)

    hf_config = vllm_config.model_config.hf_config
    assert isinstance(hf_config, FlexOlmoConfig)

    self.k_norm = RMSNorm(
        self.total_num_kv_heads * self.head_dim, eps=hf_config.rms_norm_eps
    )
    self.q_norm = RMSNorm(
        self.total_num_heads * self.head_dim, eps=hf_config.rms_norm_eps
    )

FlexOlmoDecoderLayer

Bases: Module

Source code in vllm/model_executor/models/flex_olmo.py
class FlexOlmoDecoderLayer(nn.Module):
    def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
        super().__init__()
        hf_config = vllm_config.model_config.hf_config
        assert isinstance(hf_config, FlexOlmoConfig)

        self.self_attn = FlexOlmoAttention(
            vllm_config=vllm_config, prefix=f"{prefix}.self_attn"
        )
        self.post_attention_layernorm = RMSNorm(
            hf_config.hidden_size, eps=hf_config.rms_norm_eps
        )
        self.post_feedforward_layernorm = RMSNorm(
            hf_config.hidden_size, eps=hf_config.rms_norm_eps
        )

        self.mlp = FlexOlmoMoE(vllm_config=vllm_config, prefix=f"{prefix}.mlp")

    def forward(
        self,
        positions: torch.Tensor,
        hidden_states: torch.Tensor,
        residual: Optional[torch.Tensor],
    ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
        # Attention block.
        residual = hidden_states
        hidden_states = self.self_attn(positions, hidden_states)
        hidden_states = self.post_attention_layernorm(hidden_states)
        hidden_states = hidden_states + residual

        # MLP block.
        residual = hidden_states
        hidden_states = self.mlp(hidden_states)
        hidden_states = self.post_feedforward_layernorm(hidden_states)
        hidden_states = residual + hidden_states
        return hidden_states, None

mlp instance-attribute

mlp = FlexOlmoMoE(
    vllm_config=vllm_config, prefix=f"{prefix}.mlp"
)

post_attention_layernorm instance-attribute

post_attention_layernorm = RMSNorm(
    hidden_size, eps=rms_norm_eps
)

post_feedforward_layernorm instance-attribute

post_feedforward_layernorm = RMSNorm(
    hidden_size, eps=rms_norm_eps
)

self_attn instance-attribute

self_attn = FlexOlmoAttention(
    vllm_config=vllm_config, prefix=f"{prefix}.self_attn"
)

__init__

__init__(
    *, vllm_config: VllmConfig, prefix: str = ""
) -> None
Source code in vllm/model_executor/models/flex_olmo.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None:
    super().__init__()
    hf_config = vllm_config.model_config.hf_config
    assert isinstance(hf_config, FlexOlmoConfig)

    self.self_attn = FlexOlmoAttention(
        vllm_config=vllm_config, prefix=f"{prefix}.self_attn"
    )
    self.post_attention_layernorm = RMSNorm(
        hf_config.hidden_size, eps=hf_config.rms_norm_eps
    )
    self.post_feedforward_layernorm = RMSNorm(
        hf_config.hidden_size, eps=hf_config.rms_norm_eps
    )

    self.mlp = FlexOlmoMoE(vllm_config=vllm_config, prefix=f"{prefix}.mlp")

forward

forward(
    positions: Tensor,
    hidden_states: Tensor,
    residual: Optional[Tensor],
) -> tuple[Tensor, Optional[Tensor]]
Source code in vllm/model_executor/models/flex_olmo.py
def forward(
    self,
    positions: torch.Tensor,
    hidden_states: torch.Tensor,
    residual: Optional[torch.Tensor],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
    # Attention block.
    residual = hidden_states
    hidden_states = self.self_attn(positions, hidden_states)
    hidden_states = self.post_attention_layernorm(hidden_states)
    hidden_states = hidden_states + residual

    # MLP block.
    residual = hidden_states
    hidden_states = self.mlp(hidden_states)
    hidden_states = self.post_feedforward_layernorm(hidden_states)
    hidden_states = residual + hidden_states
    return hidden_states, None

FlexOlmoForCausalLM

Bases: OlmoeForCausalLM

Source code in vllm/model_executor/models/flex_olmo.py
class FlexOlmoForCausalLM(OlmoeForCausalLM):
    fall_back_to_pt_during_load = False

    def __init__(
        self,
        *,
        vllm_config: VllmConfig,
        prefix: str = "",
        layer_type: type[nn.Module] = FlexOlmoDecoderLayer,
    ):
        super().__init__(vllm_config=vllm_config, prefix=prefix, layer_type=layer_type)

fall_back_to_pt_during_load class-attribute instance-attribute

fall_back_to_pt_during_load = False

__init__

__init__(
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    layer_type: type[Module] = FlexOlmoDecoderLayer,
)
Source code in vllm/model_executor/models/flex_olmo.py
def __init__(
    self,
    *,
    vllm_config: VllmConfig,
    prefix: str = "",
    layer_type: type[nn.Module] = FlexOlmoDecoderLayer,
):
    super().__init__(vllm_config=vllm_config, prefix=prefix, layer_type=layer_type)

FlexOlmoMoE

Bases: Module

A tensor-parallel MoE implementation for FlexOlmo that shards each expert across all ranks.

Each expert's weights are sharded across all ranks and a fused MoE kernel is used for the forward pass, and finally we reduce the outputs across ranks.

Source code in vllm/model_executor/models/flex_olmo.py
class FlexOlmoMoE(nn.Module):
    """A tensor-parallel MoE implementation for FlexOlmo that shards each expert
    across all ranks.

    Each expert's weights are sharded across all ranks and a fused MoE
    kernel is used for the forward pass, and finally we reduce the outputs
    across ranks.
    """

    def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
        super().__init__()

        hf_config = vllm_config.model_config.hf_config
        assert isinstance(hf_config, FlexOlmoConfig)

        tp_size = get_tensor_model_parallel_world_size()

        # Gate always runs at half / full precision for now.
        self.gate = ReplicatedLinear(
            hf_config.hidden_size,
            hf_config.num_experts,
            bias=False,
            return_bias=False,
            quant_config=None,
            prefix=f"{prefix}.gate",
        )

        # Gate always runs at half / full precision for now.
        self.experts = FusedMoE(
            num_experts=hf_config.num_experts,
            top_k=hf_config.num_experts_per_tok,
            hidden_size=hf_config.hidden_size,
            intermediate_size=hf_config.intermediate_size,
            reduce_results=True,
            renormalize=False,
            quant_config=None,
            tp_size=tp_size,
            prefix=f"{prefix}.experts",
        )

        self.top_k = hf_config.num_experts_per_tok

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        # NOTE: hidden_states can have either 1D or 2D shape.
        orig_shape = hidden_states.shape
        hidden_dim = hidden_states.shape[-1]
        hidden_states = hidden_states.view(-1, hidden_dim)

        # router_logits: (num_tokens, n_experts)
        router_logits = self.gate(hidden_states)
        # Warning: The experts mutate the hidden state input! This messes up
        # basic things like the residual stream.
        final_hidden_states = self.experts(
            hidden_states=hidden_states.detach().clone(),
            router_logits=router_logits.float(),
        )

        return final_hidden_states.view(orig_shape)

experts instance-attribute

experts = FusedMoE(
    num_experts=num_experts,
    top_k=num_experts_per_tok,
    hidden_size=hidden_size,
    intermediate_size=intermediate_size,
    reduce_results=True,
    renormalize=False,
    quant_config=None,
    tp_size=tp_size,
    prefix=f"{prefix}.experts",
)

gate instance-attribute

gate = ReplicatedLinear(
    hidden_size,
    num_experts,
    bias=False,
    return_bias=False,
    quant_config=None,
    prefix=f"{prefix}.gate",
)

top_k instance-attribute

top_k = num_experts_per_tok

__init__

__init__(*, vllm_config: VllmConfig, prefix: str = '')
Source code in vllm/model_executor/models/flex_olmo.py
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
    super().__init__()

    hf_config = vllm_config.model_config.hf_config
    assert isinstance(hf_config, FlexOlmoConfig)

    tp_size = get_tensor_model_parallel_world_size()

    # Gate always runs at half / full precision for now.
    self.gate = ReplicatedLinear(
        hf_config.hidden_size,
        hf_config.num_experts,
        bias=False,
        return_bias=False,
        quant_config=None,
        prefix=f"{prefix}.gate",
    )

    # Gate always runs at half / full precision for now.
    self.experts = FusedMoE(
        num_experts=hf_config.num_experts,
        top_k=hf_config.num_experts_per_tok,
        hidden_size=hf_config.hidden_size,
        intermediate_size=hf_config.intermediate_size,
        reduce_results=True,
        renormalize=False,
        quant_config=None,
        tp_size=tp_size,
        prefix=f"{prefix}.experts",
    )

    self.top_k = hf_config.num_experts_per_tok

forward

forward(hidden_states: Tensor) -> Tensor
Source code in vllm/model_executor/models/flex_olmo.py
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
    # NOTE: hidden_states can have either 1D or 2D shape.
    orig_shape = hidden_states.shape
    hidden_dim = hidden_states.shape[-1]
    hidden_states = hidden_states.view(-1, hidden_dim)

    # router_logits: (num_tokens, n_experts)
    router_logits = self.gate(hidden_states)
    # Warning: The experts mutate the hidden state input! This messes up
    # basic things like the residual stream.
    final_hidden_states = self.experts(
        hidden_states=hidden_states.detach().clone(),
        router_logits=router_logits.float(),
    )

    return final_hidden_states.view(orig_shape)