
    PhV.                        d dl mZ d dlZd dlmZ ddlmZmZ ddlmZ ddl	m
Z
mZ ddlmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZm Z m!Z!m"Z"  G d dee      Z# G d de      Z$ G d de      Z% G d de!      Z& G d de      Z' G d de       Z( G d de"      Z) G d de      Z* G d d e      Z+ G d! d"e      Z, G d# d$e      Z- G d% d&e      Z.g d'Z/y)(    )OptionalN)nn   )CacheDynamicCache)PretrainedConfig)create_causal_mask!create_sliding_window_causal_mask)BaseModelOutputWithPast)Unpack)TransformersKwargsauto_docstring)check_model_inputs   )MistralConfig)Qwen2AttentionQwen2DecoderLayerQwen2ForCausalLMQwen2ForQuestionAnsweringQwen2ForSequenceClassificationQwen2ForTokenClassificationQwen2MLP
Qwen2ModelQwen2PreTrainedModelQwen2RMSNormQwen2RotaryEmbeddingc                   D    e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddZy)MinistralConfiga  
    This is the configuration class to store the configuration of a [`MinistralModel`]. It is used to instantiate an
    Ministral model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Ministral-8B-Instruct-2410.

    [mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410)
    [mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 32000):
            Vocabulary size of the Ministral model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`MinistralModel`]
        hidden_size (`int`, *optional*, defaults to 4096):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 14336):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_key_value_heads (`int`, *optional*, defaults to 8):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details, check out [this
            paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
        head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
            The attention head dimension.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the decoder.
        max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
            The maximum sequence length that this model might ever be used with. Ministral's sliding window attention
            allows sequence of up to 4096*32 tokens.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
            The epsilon used by the rms normalization layers.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        pad_token_id (`int`, *optional*):
            The id of the padding token.
        bos_token_id (`int`, *optional*, defaults to 1):
            The id of the "beginning-of-sequence" token.
        eos_token_id (`int`, *optional*, defaults to 2):
            The id of the "end-of-sequence" token.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether the model's input and output word embeddings should be tied.
        rope_theta (`float`, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings.
        sliding_window (`int`, *optional*, defaults to 4096):
            Sliding window attention window size. If not specified, will default to `4096`.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        layer_types (`list`, *optional*):
            Attention pattern for each layer.

    ```python
    >>> from transformers import MinistralModel, MinistralConfig

    >>> # Initializing a Ministral 8B style configuration
    >>> configuration = MinistralConfig()

    >>> # Initializing a model from the Ministral 8B style configuration
    >>> model = MinistralModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```	ministralNc                 n   t        j                  | f||||d| || _        |	| _        || _        || _        || _        || _        || _        || _	        ||}|| _
        || _        |
| _        || _        || _        || _        || _        || _        | j"                  | j                  dndg|z  | _        y y )N)pad_token_idbos_token_ideos_token_idtie_word_embeddingssliding_attentionfull_attention)r   __init__
vocab_sizemax_position_embeddingshidden_sizeintermediate_sizenum_hidden_layersnum_attention_headssliding_windowhead_dimnum_key_value_heads
hidden_actinitializer_rangerms_norm_eps	use_cache
rope_thetaattention_dropoutlayer_types)selfr(   r*   r+   r,   r-   r0   r/   r1   r)   r2   r3   r4   r!   r"   r#   r$   r5   r.   r6   r7   kwargss                         i/var/www/html/saasai/venv/lib/python3.12/site-packages/transformers/models/ministral/modular_ministral.pyr'   zMinistralConfig.__init__k   s    0 	!!	
%%% 3	
 	
 %'>$&!2!2#6 ,  &"5#6 $!2("$!2&#'+':':'F#L\ ! "D $    )i }     i 8      r=      Nsilui   g{Gz?gư>TN   r   Fg     @r<   g        N)__name__
__module____qualname____doc__
model_typer'    r;   r:   r   r      sS    IV J  )!+9"r;   r   c                       e Zd Zy)MinistralMLPNrA   rB   rC   rF   r;   r:   rH   rH          r;   rH   c                   $     e Zd Zdef fdZ xZS )MinistralAttention	layer_idxc                    t         |   ||       t        j                  |j                  |j
                  | j                  z  d      | _        t        j                  |j                  |j                  | j                  z  d      | _	        t        j                  |j                  |j                  | j                  z  d      | _
        y )NF)bias)superr'   r   Linearr*   r-   r/   q_projr0   k_projv_proj)r8   configrM   	__class__s      r:   r'   zMinistralAttention.__init__   s    +ii 2 2F4N4NQUQ^Q^4^ejkii 2 2F4N4NQUQ^Q^4^ejkii 2 2F4N4NQUQ^Q^4^ejkr;   )rA   rB   rC   intr'   __classcell__rV   s   @r:   rL   rL      s    l# l lr;   rL   c                       e Zd Zy)MinistralRMSNormNrI   rF   r;   r:   r[   r[      rJ   r;   r[   c                       e Zd Zy)MinistralDecoderLayerNrI   rF   r;   r:   r]   r]      rJ   r;   r]   c                       e Zd Zy)MinistralPreTrainedModelNrI   rF   r;   r:   r_   r_      rJ   r;   r_   c                       e Zd Zy)MinistralRotaryEmbeddingNrI   rF   r;   r:   ra   ra      rJ   r;   ra   c                       e Zd Zdef fdZee	 	 	 	 	 	 	 ddeej                     deej                     deej                     dee   deej                     dee   d	eej                     d
ee   defd              Z xZS )MinistralModelrU   c                 (    t         |   |       | `y )N)rP   r'   has_sliding_layers)r8   rU   rV   s     r:   r'   zMinistralModel.__init__   s     #r;   	input_idsattention_maskposition_idspast_key_valuesinputs_embedsr4   cache_positionr9   returnc                    |d u |d uz  rt        d      || j                  |      }|r|t        | j                        }|F||j	                         nd}	t        j                  |	|	|j                  d   z   |j                        }||j                  d      }t        |x}
t              s*| j                  |||||d}t        d
i |t        d
i |d}
|}| j                  ||      }| j                  d | j                  j                    D ]  } ||f|
|j"                     |||||d|}! | j%                  |      }t'        ||r|	      S d 	      S )Nz:You must specify exactly one of input_ids or inputs_embeds)rU   r   r@   )device)rU   input_embedsrg   rk   ri   rh   )r&   r%   )rg   rh   ri   r4   rk   position_embeddings)last_hidden_stateri   rF   )
ValueErrorembed_tokensr   rU   get_seq_lengthtorcharangeshapern   	unsqueeze
isinstancedictr	   r
   
rotary_emblayersr,   attention_typenormr   )r8   rf   rg   rh   ri   rj   r4   rk   r9   past_seen_tokenscausal_mask_mappingmask_kwargshidden_statesrp   decoder_layers                  r:   forwardzMinistralModel.forward   s    -t";<YZZ  --i8M0*$++>O!CRC^==?de"\\ "2]5H5H5K"KTaThThN )33A6L ?-F ++ -"0"0#2 ,K #5"C{"C%F%U%U#
 & #oom\J![[)H4;;+H+HIM)	2=3O3OP) /#-$7	 	M J 		-0&+/8O
 	
>B
 	
r;   )NNNNNNN)rA   rB   rC   r   r'   r   r   r   ru   
LongTensorTensorr   FloatTensorboolr   r   r   r   rX   rY   s   @r:   rc   rc      s    $ $  151537+/59$(59C
E,,-C
 !.C
 u//0	C

 "%C
   1 12C
 D>C
 !!1!12C
 +,C
 
!C
  C
r;   rc   c                       e Zd Zy)MinistralForCausalLMNrI   rF   r;   r:   r   r     rJ   r;   r   c                       e Zd Zy)"MinistralForSequenceClassificationNrI   rF   r;   r:   r   r     rJ   r;   r   c                       e Zd Zy)MinistralForTokenClassificationNrI   rF   r;   r:   r   r     rJ   r;   r   c                       e Zd Zy)MinistralForQuestionAnsweringNrI   rF   r;   r:   r   r     rJ   r;   r   )r   r_   rc   r   r   r   r   )0typingr   ru   r   cache_utilsr   r   configuration_utilsr   masking_utilsr	   r
   modeling_outputsr   processing_utilsr   utilsr   r   utils.genericr   mistral.configuration_mistralr   qwen2.modeling_qwen2r   r   r   r   r   r   r   r   r   r   r   r   rH   rL   r[   r]   r_   ra   rc   r   r   r   r   __all__rF   r;   r:   <module>r      s       . 3 R 7 & 7 / 9   G"m%5 G"T	8 	l l	| 		- 		3 		3 	J
Z J
Z	+ 		)G 		&A 		$= 	r;   