
    <hTE                         S SK r S SKJrJrJr  SSKJrJr  SSKJ	r	  SSK
Jr  SSKJr  \R                  " \5      r " S	 S
\5      r " S S\5      rSS
/rg)    N)AnyOptionalUnion   )PretrainedConfiglayer_type_validation)rope_config_validation)logging   )SiglipVisionConfigc                      ^  \ rS rSrSrSrS/rSSSSSSSS.rS/S	/4S
S/S
/4S
/S
/4S.r                          SU 4S jjr	\
S 5       r\R                  S 5       rSrU =r$ )Gemma3TextConfig"   a  
This is the configuration class to store the configuration of a [`Gemma3TextModel`]. It is used to instantiate an Gemma3Text
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma3Text-7B.
e.g. [google/gemma3_text-7b](https://huggingface.co/google/gemma3_text-7b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
    vocab_size (`int`, *optional*, defaults to 262208):
        Vocabulary size of the Gemma3Text model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`Gemma3TextModel`]
    hidden_size (`int`, *optional*, defaults to 2304):
        Dimension of the hidden representations.
    intermediate_size (`int`, *optional*, defaults to 9216):
        Dimension of the MLP representations.
    num_hidden_layers (`int`, *optional*, defaults to 26):
        Number of hidden layers in the Transformer decoder.
    num_attention_heads (`int`, *optional*, defaults to 8):
        Number of attention heads for each attention layer in the Transformer decoder.
    num_key_value_heads (`int`, *optional*, defaults to 4):
        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
        `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
        `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
        by meanpooling all the original heads within that group. For more details, check out [this
        paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
        `num_attention_heads`.
    head_dim (`int`, *optional*, defaults to 256):
        The attention head dimension.
    hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
        The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
        if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
    max_position_embeddings (`int`, *optional*, defaults to 131072):
        The maximum sequence length that this model might ever be used with.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    rms_norm_eps (`float`, *optional*, defaults to 1e-06):
        The epsilon used by the rms normalization layers.
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models). Only
        relevant if `config.is_decoder=True`.
    pad_token_id (`int`, *optional*, defaults to 0):
        Padding token id.
    eos_token_id (`int`, *optional*, defaults to 1):
        End of stream token id.
    bos_token_id (`int`, *optional*, defaults to 2):
        Beginning of stream token id.
    tie_word_embeddings (`bool`, *optional*, defaults to `True`):
        Whether to tie weight embeddings
    rope_theta (`float`, *optional*, defaults to 1000000.0):
        The base period of the RoPE embeddings.
    attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
        Whether to use a bias in the query, key, value and output projection layers during self-attention.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    query_pre_attn_scalar (`float`, *optional*, defaults to 256):
        Scaling factor used on the attention scores
    sliding_window (`int`, *optional*, defaults to 4096):
        In Gemma3Text, every other layer uses sliding window attention. This is the size of the sliding window.
    layer_types (`list`, *optional*):
        Attention pattern for each layer.
    final_logit_softcapping (`float`, *optional*):
        Scaling factor when applying tanh softcapping on the logits.
    attn_logit_softcapping (`float`, *optional*):
        Scaling factor when applying tanh softcapping on the attention scores.
    rope_scaling (`Dict`, *optional*):
        Dictionary containing the scaling configuration for the RoPE embeddings used in global attention. NOTE: if you apply new rope type
        and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
        accordingly.
        Expected contents:
            `rope_type` (`str`):
                The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
                'llama3'], with 'default' being the original RoPE implementation.
            `factor` (`float`, *optional*):
                Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
                most scaling types, a `factor` of x will enable the model to handle sequences of length x *
                original maximum pre-trained length.
            `original_max_position_embeddings` (`int`, *optional*):
                Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
                pretraining.
            `attention_factor` (`float`, *optional*):
                Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
                computation. If unspecified, it defaults to value recommended by the implementation, using the
                `factor` field to infer the suggested value.
            `beta_fast` (`float`, *optional*):
                Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
                ramp function. If unspecified, it defaults to 32.
            `beta_slow` (`float`, *optional*):
                Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
                ramp function. If unspecified, it defaults to 1.
            `short_factor` (`list[float]`, *optional*):
                Only used with 'longrope'. The scaling factor to be applied to short contexts (<
                `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                size divided by the number of attention heads divided by 2
            `long_factor` (`list[float]`, *optional*):
                Only used with 'longrope'. The scaling factor to be applied to long contexts (<
                `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                size divided by the number of attention heads divided by 2
            `low_freq_factor` (`float`, *optional*):
                Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
            `high_freq_factor` (`float`, *optional*):
                Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
    rope_local_base_freq (float, *optional*, defaults to 10000.0):
        The base period of the RoPE embeddings for local attention.

```python
>>> from transformers import Gemma3TextModel, Gemma3TextConfig
>>> # Initializing a Gemma3Text gemma3_text-7b style configuration
>>> configuration = Gemma3TextConfig()
>>> # Initializing a model from the gemma3_text-7b style configuration
>>> model = Gemma3TextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
gemma3_textpast_key_valuescolwiserowwise)zlayers.*.self_attn.q_projzlayers.*.self_attn.k_projzlayers.*.self_attn.v_projzlayers.*.self_attn.o_projzlayers.*.mlp.gate_projzlayers.*.mlp.up_projzlayers.*.mlp.down_proj	input_idsinputs_embedshidden_statesattention_mask)embed_tokenslayersnormc                 v  > [         TU ]  " SUUUUS.UD6  Xl        Xl        X l        X0l        X@l        XPl        Xpl        X`l	        Xl
        Xl        Xl        UU l        UU l        UU l        Xl        UU l        UU l        UU l        UU l        UU l        UU l        UU l        [1        U 5        UR3                  SS5      U l        U R*                  cL  [7        U R                  5       Vs/ sH'  n[9        US-   U R4                  -  5      (       a  SOSPM)     snU l        [;        U R*                  5        g s  snf )N)pad_token_idbos_token_ideos_token_idtie_word_embeddingssliding_window_pattern      sliding_attentionfull_attention )super__init__
vocab_sizemax_position_embeddingshidden_sizeintermediate_sizenum_hidden_layersnum_attention_headshead_dimnum_key_value_headsinitializer_rangerms_norm_eps	use_cache
rope_thetaattention_biasattention_dropouthidden_activationquery_pre_attn_scalarsliding_windowfinal_logit_softcappingattn_logit_softcappinglayer_typesrope_local_base_freqrope_scalingr	   get_sliding_window_patternrangeboolr   )selfr(   r*   r+   r,   r-   r/   r.   r6   r)   r0   r1   r2   r   r   r   r   r3   r4   r5   r7   r8   r;   r9   r:   r=   r<   kwargsi	__class__s                                g/var/www/html/shao/venv/lib/python3.13/site-packages/transformers/models/gemma3/configuration_gemma3.pyr'   Gemma3TextConfig.__init__   sO   < 	 	
%%% 3		

 	
 %'>$&!2!2#6  #6 !2("$,!2!2%:",'>$&<#&$8!(t$ (.zz2JA'N$# t556 6A (,QUd6R6R,R'S'S#Yii6 D 	d../	 s   +-D6c                 P    [         R                  " S[        5        U R                  $ )NzTThe `sliding_window_pattern` attribute is deprecated and will be removed in v4.55.0.)warningswarnFutureWarningr?   )rB   s    rF   r    'Gemma3TextConfig.sliding_window_pattern   s"    b	
 +++    c                     Xl         g )N)r?   )rB   values     rF   r    rL      s    ',$rM   )r?   r4   r5   r:   r9   r.   r6   r*   r0   r+   r;   r)   r-   r,   r/   r7   r1   r<   r=   r3   r8   r2   r(   )i@  i 	  i $              gelu_pytorch_tanhi   {Gz?gư>Tr   r"   r   Tg    .AFg        rS   i   NNNNg     @)__name__
__module____qualname____firstlineno____doc__
model_typekeys_to_ignore_at_inferencebase_model_tp_planbase_model_pp_planr'   propertyr    setter__static_attributes____classcell__rE   s   @rF   r   r   "   s    rh J#4"5%.%.%.%."+ )"+ &(9:#%568IJ!"_$56 - ' ! $#%7F0P , , ""- #-rM   r   c                      ^  \ rS rSrSrSrSSSS.r\\S.r	       SS	\
\\\\\4   4      S
\
\\\\\4   4      S\S\S\S\S\4U 4S jjjrSrU =r$ )Gemma3Config   a  
This is the configuration class to store the configuration of a [`Gemma3ForConditionalGeneration`]. It is used to instantiate an
Gemma3ForConditionalGeneration according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PaliGemma-2B.

e.g. [google/gemma-3-4b](https://huggingface.co/google/gemma-3-4b)

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    text_config (`Union[Gemma3TextConfig, dict]`, *optional*):
        The config object of the text backbone.
    vision_config (`Union[AutoConfig, dict]`,  *optional*):
        Custom vision config or dict.
    mm_tokens_per_image (`int`, *optional*, defaults to 256):
        The number of tokens per image embedding.
    boi_token_index (`int`, *optional*, defaults to 255999):
        The begin-of-image token index to wrap the image prompt.
    eoi_token_index (`int`, *optional*, defaults to 256000):
        The end-of-image token index to wrap the image prompt.
    image_token_index (`int`, *optional*, defaults to 262144):
        The image token index to encode the image prompt.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.


Example:

```python
>>> from transformers import Gemma3ForConditionalGeneration, Gemma3Config, SiglipVisionConfig, Gemma3TextConfig

>>> # Initializing a Siglip-like vision config
>>> vision_config = SiglipVisionConfig()

>>> # Initializing a Gemma3 Text config
>>> text_config = Gemma3TextConfig()

>>> # Initializing a Gemma3 gemma-3-4b style configuration
>>> configuration = Gemma3Config(vision_config, text_config)

>>> # Initializing a model from the gemma-3-4b style configuration
>>> model = Gemma3TextConfig(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```gemma3image_token_indexboi_token_indexeoi_token_index)image_token_idboi_token_ideoi_token_id)text_configvision_configrn   ro   mm_tokens_per_imager0   c                   > Uc   [        5       n[        R                  S5        O [        U[        5      (       a  [        S0 UD6n[        U[        5      (       a  [        S0 UD6nO"Uc  [        5       n[        R                  S5        Xl        X l        X0l        X@l	        XPl
        X`l        Xpl        [        T	U ]8  " S0 UD6  g )Nz@text_config is None, using default Gemma3TextConfig text config.zFvision_config is None, using default SiglipVisionConfig vision config.r%   )r   loggerinfo
isinstancedictr   rn   ro   rp   ri   rj   rh   r0   r&   r'   )
rB   rn   ro   rp   ri   rj   rh   r0   rC   rE   s
            rF   r'   Gemma3Config.__init__9  s     *,KKKZ[T***9[9KmT**.??M".0MKK`a&*#6 ..!2!2"6"rM   )ri   rj   rh   r0   rp   rn   ro   )NNrS   i i  i   rU   )rV   rW   rX   rY   rZ   r[   attribute_mapr   r   sub_configsr   r   ru   strr   intfloatr'   ra   rb   rc   s   @rF   re   re      s    .` J-))M (+K JNMQ#&&&!(#'#e$4d38n$DEF#  &8$sCx.&H IJ# !	#
 # # # !# #rM   re   )rI   typingr   r   r   configuration_utilsr   r   modeling_rope_utilsr	   utilsr
   siglipr   
get_loggerrV   rr   r   re   __all__r%   rM   rF   <module>r      s[   ,  ' ' J 9  ' 
		H	%X-' X-v[## [#| -
.rM   