
    <h                        S SK JrJrJr  S SKrS SKJr  S SKJrJ	r	  SSK
Jr  SSKJrJrJr  SSKJr  SSKJr  SS	KJr  SS
KJrJr  SSKJr  SSKJr  SSKJrJrJ r J!r!J"r"  SSK#J$r$  SSK%J&r&J'r'  SSK(J)r)  SSK*J+r+J,r,J-r-J.r.  SSK/J0r0J1r1J2r2  SSK3J4r4J5r5J6r6  SSK7J8r8J9r9  \.Rt                  " \;5      r< " S S\5      r= " S S\R|                  5      r? " S S\R|                  5      r@ " S S\05      rA " S S\15      rB " S  S!\45      rC " S" S#\5      rD\, " S$ S%\'5      5       rE " S& S'\E5      rF " S( S)\55      rG " S* S+\85      rH\," S,S-9 " S. S/\E\5      5       rI/ S0QrJg)1    )CallableOptionalUnionN)OutputRecordercheck_model_inputs   )ACT2FN)CacheDynamicCacheEncoderDecoderCache)PretrainedConfig)GenerationMixin)create_causal_mask)_prepare_4d_attention_mask#_prepare_4d_attention_mask_for_sdpa)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPast)BaseModelOutputWithPastAndCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutput)rope_config_validation)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)TransformersKwargsauto_docstringcan_return_tuplelogging   )GlmAttentionGlmRotaryEmbeddingapply_rotary_pos_emb)LlamaDecoderLayer
LlamaModeleager_attention_forward)WhisperModelshift_tokens_rightc                   v   ^  \ rS rSrSrSrS/rSSSS.r                        SU 4S	 jjrS
r	U =r
$ )MoonshineConfig1   a!  
This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Moonshine
[UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny).

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.

Args:
    vocab_size (`int`, *optional*, defaults to 32768):
        Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the
        `inputs_ids` passed when calling [`MoonshineModel`].
    hidden_size (`int`, *optional*, defaults to 288):
        Dimension of the hidden representations.
    intermediate_size (`int`, *optional*, defaults to 1152):
        Dimension of the MLP representations.
    encoder_num_hidden_layers (`int`, *optional*, defaults to 6):
        Number of hidden layers in the Transformer encoder.
    decoder_num_hidden_layers (`int`, *optional*, defaults to 6):
        Number of hidden layers in the Transformer decoder.
    encoder_num_attention_heads (`int`, *optional*, defaults to 8):
        Number of attention heads for each attention layer in the Transformer encoder.
    decoder_num_attention_heads (`int`, *optional*, defaults to 8):
        Number of attention heads for each attention layer in the Transformer decoder.
    encoder_num_key_value_heads (`int`, *optional*):
        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
        `encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
        `encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
        by meanpooling all the original heads within that group. For more details, check out [this
        paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
        `num_attention_heads`.
    decoder_num_key_value_heads (`int`, *optional*):
        This is the number of key_value heads that should be used to implement Grouped Query Attention. If
        `decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
        `decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
        converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
        by meanpooling all the original heads within that group. For more details, check out [this
        paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
        `decoder_num_attention_heads`.
    pad_head_dim_to_multiple_of (`int`, *optional*):
        Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain
        optimized attention implementations.
    encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
        The non-linear activation function (function or string) in the encoder.
    decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
        The non-linear activation function (function or string) in the decoder.
    max_position_embeddings (`int`, *optional*, defaults to 512):
        The maximum sequence length that this model might ever be used with.
    initializer_range (`float`, *optional*, defaults to 0.02):
        The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
    decoder_start_token_id (`int`, *optional*, defaults to 1):
        Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
        are provided to the `generate` function. It is used to guide the model`s generation process depending on
        the task.
    use_cache (`bool`, *optional*, defaults to `True`):
        Whether or not the model should return the last key/values attentions (not used by all models).
    rope_theta (`float`, *optional*, defaults to 10000.0):
        The base period of the RoPE embeddings.
    rope_scaling (`Dict`, *optional*):
        Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
        and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
        accordingly.
        Expected contents:
            `rope_type` (`str`):
                The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
                'llama3'], with 'default' being the original RoPE implementation.
            `factor` (`float`, *optional*):
                Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
                most scaling types, a `factor` of x will enable the model to handle sequences of length x *
                original maximum pre-trained length.
            `original_max_position_embeddings` (`int`, *optional*):
                Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
                pretraining.
            `attention_factor` (`float`, *optional*):
                Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
                computation. If unspecified, it defaults to value recommended by the implementation, using the
                `factor` field to infer the suggested value.
            `beta_fast` (`float`, *optional*):
                Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
                ramp function. If unspecified, it defaults to 32.
            `beta_slow` (`float`, *optional*):
                Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
                ramp function. If unspecified, it defaults to 1.
            `short_factor` (`list[float]`, *optional*):
                Only used with 'longrope'. The scaling factor to be applied to short contexts (<
                `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                size divided by the number of attention heads divided by 2
            `long_factor` (`list[float]`, *optional*):
                Only used with 'longrope'. The scaling factor to be applied to long contexts (<
                `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                size divided by the number of attention heads divided by 2
            `low_freq_factor` (`float`, *optional*):
                Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
            `high_freq_factor` (`float`, *optional*):
                Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
    partial_rotary_factor (`float`, *optional*, defaults to 0.9):
        Percentage of the query and keys which will have rotary embedding.
    is_encoder_decoder (`bool`, *optional*, defaults to `True`):
        Whether the model is used as an encoder/decoder or not.
    attention_bias (`bool`, *optional*, defaults to `False`):
        Whether to use a bias in the query, key, value and output projection layers during self-attention.
    attention_dropout (`float`, *optional*, defaults to 0.0):
        The dropout ratio for the attention probabilities.
    bos_token_id (`int`, *optional*, defaults to 1):
        Denotes beginning of sequences token id.
    eos_token_id (`int`, *optional*, defaults to 2):
        Denotes end of sequences token id.

Example:

```python
>>> from transformers import MoonshineModel, MoonshineConfig

>>> # Initializing a Moonshine style configuration
>>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny")

>>> # Initializing a model from the configuration
>>> model = MoonshineModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```	moonshinepast_key_valuesencoder_num_key_value_headsencoder_num_attention_headsencoder_num_hidden_layers)num_key_value_headsnum_attention_headsnum_hidden_layersc                 p  > Xl         X l        X0l        X@l        XPl        X`l        Xpl        Uc  UnXl        U	c  Un	Xl        Xl	        Xl
        Xl        Xl        Xl        Xl        UU l        UU l        UU l        UU l        UU l        UU l        UU l        [-        U 5        [.        TU ]`  " SUUUUS.UD6  g )N)bos_token_ideos_token_idis_encoder_decoderdecoder_start_token_id )
vocab_sizehidden_sizeintermediate_sizer1   decoder_num_hidden_layersr0   decoder_num_attention_headsr/   decoder_num_key_value_headspad_head_dim_to_multiple_ofencoder_hidden_actdecoder_hidden_actmax_position_embeddingsinitializer_ranger9   	use_cache
rope_thetarope_scalingpartial_rotary_factorr8   attention_biasattention_dropoutr   super__init__)selfr;   r<   r=   r1   r>   r0   r?   r/   r@   rA   rB   rC   rD   rE   r9   rF   rG   rH   rI   r8   rJ   rK   r6   r7   kwargs	__class__s                             g/var/www/html/shao/venv/lib/python3.13/site-packages/transformers/models/moonshine/modular_moonshine.pyrM   MoonshineConfig.__init__   s    8 %&!2)B&)B&+F(+F(&.*E'+F(&.*E'+F(+F("4"4'>$!2&<#"$(%:""4,!2 	t$ 	
%%1#9		

 	
    )rJ   rK   rC   r?   r>   r@   r9   rB   r0   r1   r/   r<   rE   r=   r8   rD   rA   rI   rH   rG   rF   r;   )i   i   i     rT      rU   NNNgelusilui   g{Gz?   Tg     @Ng?TF        rX   r!   )__name__
__module____qualname____firstlineno____doc__
model_typekeys_to_ignore_at_inferenceattribute_maprM   __static_attributes____classcell__rP   s   @rQ   r+   r+   1   s    {z J#4"5<<8M "#"#$%$%$($($(!! # !3D
 D
rS   r+   c                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )MoonshineEncoderMLP   c                 
  > [         TU ]  5         Xl        [        U   U l        [
        R                  " UR                  UR                  5      U l	        [
        R                  " UR                  UR                  5      U l
        g NrL   rM   configr	   activation_fnnnLinearr<   r=   fc1fc2rN   rk   
hidden_actrP   s      rQ   rM   MoonshineEncoderMLP.__init__   s\    #J/99V//1I1IJ99V55v7I7IJrS   hidden_statesreturnc                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ ri   )ro   rl   rp   )rN   rt   s     rQ   forwardMoonshineEncoderMLP.forward  s4    /**=9/rS   rl   rk   ro   rp   
rZ   r[   r\   r]   rM   torchTensorrw   rb   rc   rd   s   @rQ   rf   rf      s)    KU\\ ell  rS   rf   c                   b   ^  \ rS rSrU 4S jrS\R                  S\R                  4S jrSrU =r	$ )MoonshineDecoderMLPi  c                   > [         TU ]  5         Xl        [        U   U l        [
        R                  " UR                  UR                  S-  5      U l	        [
        R                  " UR                  UR                  5      U l
        g )Nr!   rj   rq   s      rQ   rM   MoonshineDecoderMLP.__init__  sa    #J/99V//1I1IA1MN99V55v7I7IJrS   rt   ru   c                     U R                  U5      nUR                  SSS9u  pU R                  U5      U-  nU R                  U5      nU$ )Nr!   )dim)ro   chunkrl   rp   )rN   rt   gates      rQ   rw   MoonshineDecoderMLP.forward  sQ    /+11!1<**40=@/rS   ry   rz   rd   s   @rQ   r~   r~     s)    KU\\ ell  rS   r~   c                   x  ^  \ rS rSrS\S\S\S\S\4
U 4S jjr     SS\R                  S	\
\\R                  \R                  4      S
\
\R                     S\
\   S\
\R                     S\
\R                     S\\   S\\R                  \
\R                     \
\\R                        4   4S jjrSrU =r$ )MoonshineAttentioni  rk   	layer_idx	is_causalr3   r2   c                 f  > UR                  XES.5        [        TU ]	  X5        X0l        [	        USUR
                  UR                  -  5      U l        U R                  R                  bA  U R                  R                  nX`R                  U-   S-
  U-  -  nXpR                  -
  U l
        g SU l
        g )N)r3   r2   head_dimrX   r   )updaterL   rM   r   getattrr<   r3   r   rk   rA   head_dim_padding)	rN   rk   r   r   r3   r2   target_multipletarget_head_dimrP   s	           rQ   rM   MoonshineAttention.__init__  s     	.Ano+"
F4F4F&JdJd4de ;;22>"kkEEO---/2QTU2UZi1ijO$3mm$CD!$%D!rS   rt   position_embeddingsattention_maskpast_key_valuecache_positionkey_value_statesrO   ru   c                    UR                   S S u  pU R                  U5      R                  XU R                  R                  U R
                  5      R                  SS5      n
US LnUb^  UR                  R                  U R                  5      nU(       a&  SUR                  U R                  '   UR                  nOUR                  nUb  UOUnU(       aU  U(       aN  W(       aG  UR                  U R                     R                  nUR                  U R                     R                  nOU R                  U5      R                  USU R                  R                  U R
                  5      R                  SS5      nU R!                  U5      R                  USU R                  R                  U R
                  5      R                  SS5      nU(       a$  Ub!  UR#                  XU R                  SU05      u  pU(       d<  Uu  nn[%        XUU5      u  pUb%  UUUS.nUR#                  XU R                  U5      u  p[&        nU R                  R(                  S:w  a  [*        U R                  R(                     nU R,                  =(       a    US L =(       a    U	S:  nU R.                  S:  a  [0        R2                  R4                  R7                  U
SU R.                  45      n
[0        R2                  R4                  R7                  USU R.                  45      n[0        R2                  R4                  R7                  USU R.                  45      nU" U U
UUU4U R8                  (       d  S	OU R:                  U R<                  US
.UD6u  nnU R.                  S:  a  USS U R.                  * 24   nUR?                  XS5      RA                  5       nU RC                  U5      nUU4$ )Nr   rX   r!   Tr   )sincosr   eagerr   rY   )dropoutscalingr   .)"shapeq_projviewrk   r2   r   	transpose
is_updatedgetr   cross_attention_cacheself_attention_cachelayerskeysvaluesk_projv_projr   r$   r'   _attn_implementationr   r   r   r{   rm   
functionalpadtrainingrK   r   reshape
contiguouso_proj)rN   rt   r   r   r   r   r   rO   bszq_lenquery_statesis_cross_attentionr   current_states
key_statesvalue_statesr   r   cache_kwargsattention_interfacer   attn_outputattn_weightss                          rQ   rw   MoonshineAttention.forward3  sq    #(("-
 KK&++C8W8WY]YfYfgqqrsuvw 	 .T9%'2266t~~FJ!<@))$..9!/!E!E!/!D!D .>-I)}.Z'..t~~>CCJ)00@GGL N+c2t{{>>N1a  N+c2t{{>>N1a 
 "n&@+9+@+@dnn?OQ_>`,(
 "*HC';LVY[^'_$L)'*3.Y+9+@+@dnnl,(
 )@;;++w6"9$++:Z:Z"[NNK~'=K%!)	  1$ 88..22<!TEZEZA[\L,,00aAVAV=WXJ 88..22<!TEZEZA[\L$7
%
  $}}C$2H2HLL
%
 
%
!\   1$%c+Cd.C.C-C+C&CDK!))#b9DDFkk+.L((rS   )r   r   r   )NNNNN)rZ   r[   r\   r]   r+   intboolrM   r{   r|   r   tupler
   
LongTensorr   r   rw   rb   rc   rd   s   @rQ   r   r     s   && & 	&
 !& !&0 LP15*.5937U)||U) &eELL%,,,F&GHU) !.	U)
 !U) !!1!12U) #5<<0U) -.U) 
u||Xell3XeELL>Q5RR	SU) U)rS   r   c                       \ rS rSrSrg)MoonshineRotaryEmbeddingi  r:   N)rZ   r[   r\   r]   rb   r:   rS   rQ   r   r     s    rS   r   c                   4   ^  \ rS rSrS\S\4U 4S jjrSrU =r$ )MoonshineEncoderLayeri  rk   r   c                 4  > [         TU ]  X5        [        UUSUR                  UR                  S9U l        [        XR                  5      U l        [        R                  " UR                  SS9U l        [        R                  " UR                  SS9U l        g )NFrk   r   r   r3   r2   bias)rL   rM   r   r0   r/   	self_attnrf   rB   mlprm   	LayerNormr<   input_layernormpost_attention_layernormrN   rk   r   rP   s      rQ   rM   MoonshineEncoderLayer.__init__  s}    ++ & B B & B B
 'v/H/HI!||F,>,>UK(*V5G5Ge(T%rS   )r   r   r   r   )	rZ   r[   r\   r]   r+   r   rM   rb   rc   rd   s   @rQ   r   r     s    U U3 U UrS   r   c                      ^  \ rS rSrSS\S\\   4U 4S jjjr          SS\R                  S\\R                     S\\R                     S\\R                     S	\\R                     S
\\R                     S\\   S\\   S\\R                     S\\\R                  \R                  4      S\\\R                  \R                  4      S\\   S\\R                   \\\R                   \R                   4      4   4S jjrSrU =r$ )MoonshineDecoderLayeri  rk   r   c                   > [         TU ]  5         UR                  U l        [        UUSUR                  UR
                  S9U l        [        UUSUR                  UR
                  S9U l        [        XR                  5      U l
        [        R                  " UR                  SS9U l        [        R                  " UR                  SS9U l        [        R                  " UR                  SS9U l        g )NTr   Fr   )rL   rM   r<   r   r?   r@   r   encoder_attnr~   rC   r   rm   r   r   r   final_layernormr   s      rQ   rM   MoonshineDecoderLayer.__init__  s    !--+ & B B & B B
 / & B B & B B
 'v/H/HI!||F,>,>UK(*V5G5Ge(T%!||F,>,>UKrS   rt   r   encoder_hidden_statesencoder_attention_maskposition_idsencoder_position_idsr   rF   r   r   encoder_position_embeddingsrO   ru   c                    UnU R                  U5      nU R                  " SUUUUUU	U
S.UD6u  pX-   nUb,  UnU R                  U5      nU R                  UUUUUS9u  pX-   nUnU R	                  U5      nU R                  U5      nX-   nU$ )N)rt   r   r   r   rF   r   r   )rt   r   r   r   rF   r:   )r   r   r   r   r   r   )rN   rt   r   r   r   r   r   r   rF   r   r   r   rO   residual_s                  rQ   rw   MoonshineDecoderLayer.forward  s     !,,];>> 	
')%)) 3	
 	
 !0 ,$H 99-HM#00+!65-#  1  M %4M ,,];/ 0rS   )r   r   r<   r   r   r   r   ri   )
NNNNNNFNNN)rZ   r[   r\   r]   r+   r   r   rM   r{   r|   r   r
   r   r   r   r   FloatTensorrw   rb   rc   rd   s   @rQ   r   r     st   L L8C= L L6 268<9=37;?*.$)59KOSW.||. !..  (5	.
 !) 6. u//0. 'u'7'78. !. D>. !!1!12. &eELL%,,,F&GH. &.eELL%,,4N.O%P. +,. 
u  (51B1BEDUDU1U+V"WW	X. .rS   r   c                   b    \ rS rSr% \\S'   SrSrSrSS/r	Sr
SrSrS\R                  4S	 jrS
rg)MoonshinePreTrainedModeli  rk   modelinput_valuesTr   r   input_lengthsc                 ~    [        US-
  S-  S-   5      n[        US-
  S-  S-   5      n[        US-
  S-  S-   5      nU$ )z8
Computes the output length of the convolutional layers
   @   rX      r   r!   )r   )rN   r   output_conv1_lengthoutput_conv2_lengthoutput_conv3_lengths        rQ    _get_feat_extract_output_lengths9MoonshinePreTrainedModel._get_feat_extract_output_lengths  sZ     "=3#6""<q"@A!#6#:a"?!"CD!#6#:a"?!"CD""rS   r:   N)rZ   r[   r\   r]   r+   __annotations__base_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_flash_attn_supports_sdpa_can_compile_fullgraphr{   r   r   rb   r:   rS   rQ   r   r     sH    $O&*#02IJN!#e>N>N #rS   r   c            
          ^  \ rS rSrSrSr\\S.rS\	4U 4S jjr
S\R                  4S jrS	\R                  4S
 jr\ SS\R"                  S\\R&                     S\\   S\4S jj5       rSrU =r$ )MoonshineEncoderi  z
Transformer encoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MoonshineEncoderLayer`]

Args:
    config: MoonshineConfig
r   )
attentionsrt   rk   c           	      J  > [         TU ]  U5        Xl        UR                  n[        R
                  " SUSSSS9U l        [        R
                  " USU-  SSS	9U l        [        R
                  " SU-  USSS	9U l        [        R                  " SUS
S9U l
        [        US9U l        [        R                  " [        UR                  5       Vs/ sH  n[!        X5      PM     sn5      U l        [        R$                  " USS9U l        SU l        U R+                  5         g s  snf )NrX   r   r   F)kernel_sizestrider   r!   r   r   )r   r   gh㈵>)
num_groupsnum_channelseps)rk   r   )rL   rM   rk   r<   rm   Conv1dconv1conv2conv3	GroupNorm	groupnormr   
rotary_emb
ModuleListranger1   r   r   r   
layer_normgradient_checkpointing	post_init)rN   rk   	embed_dimidxrP   s       rQ   rM   MoonshineEncoder.__init__  s     &&	YYq)ReT
YYy!i-QqQ
YYq9}iQqQ
PTU2&Amm;@AaAa;bc;bC"6/;bc
 ,,yu=&+#	 ds   D ru   c                     U R                   $ ri   r  rN   s    rQ   get_input_embeddings%MoonshineEncoder.get_input_embeddings"  s    zzrS   valuec                     Xl         g ri   r  )rN   r  s     rQ   set_input_embeddings%MoonshineEncoder.set_input_embeddings%  s    
rS   r   rO   c                    UR                  S5      n[        R                  R                  U R	                  U5      5      nU R                  U5      n[        R                  R                  U R                  U5      5      n[        R                  R                  U R                  U5      5      nUR                  SSS5      nUb  U R                  UR                  S   5      nSnUSSSU24   SSU24   nU R                  R                  S:X  a  US	:H  R                  5       (       a  UOSnOEU R                  R                  S
:X  a  [        X$R                   5      nO[#        X$R                   5      n[$        R&                  " SUR                  S   UR(                  S9R                  S5      nU R+                  XG5      nU R,                   H  n	U	" U4UUUS.UD6nM     U R/                  U5      n[1        US9$ )a  
Args:
    input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
        Float values of the raw speech waveform. Raw speech waveform can be
        obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
        `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or
        the soundfile library (`pip install soundfile`). To prepare the array into
        `input_values`, the [`AutoFeatureExtractor`] should be used for padding
        and conversion into a tensor of type `torch.FloatTensor`.
    attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
        Mask to avoid performing attention on padding indices in `input_values`. Mask values selected in `[0, 1]`:
        - 1 for tokens that are **not masked**,
        - 0 for tokens that are **masked**.
        [What are attention masks?](../glossary#attention-mask)
rX   r   r!   Nr     .flash_attention_2rY   sdpadevice)r   r   r   )last_hidden_state)	unsqueezerm   r   tanhr  r  rV   r  r  permuter   r   rk   r   anyr   dtyper   r{   aranger  r  r   r	  r   )
rN   r   r   rO   rt   mask_lendownsample_strider   r   encoder_layers
             rQ   rw   MoonshineEncoder.forward(  s   , $--a0**4::l+CD}5**4::m+DE**4::m+DE%--aA6 %<<^=Q=QRT=UVH *+C1D3D1D,DEc9H9nUN{{//3FF4Bc4I3N3N3P3PVZ11V;!D^UhUh!i!;NL_L_!`||A}':':1'=mFZFZ[eefgh"oomJ![[M)-)$7	
 M ) 6&+
 	
rS   )	rk   r  r  r  r
  r  r	  r   r  ri   )rZ   r[   r\   r]   r^   r   r   r   _can_record_outputsr+   rM   rm   Moduler  r  r   r{   r   r   r|   r   r   r   rw   rb   rc   rd   s   @rQ   r   r     s     %O(.
 $bii "))   268
''8
 !.8
 +,	8

 
!8
 8
rS   r   c                     ^  \ rS rSrSr\" \SSS9\\" \SSS9S.rS\	4U 4S	 jjr
\         SS\\R                     S
\\R                     S\\R                     S\\   S\\R"                     S\\   S\\R                     S\\R"                     S\\R                     S\\   S\\\4   4S jj5       rSrU =r$ )MoonshineDecoderid  	input_idsrX   r   )index
layer_namer   )r   rt   cross_attentionsrk   c           	        > [         TU ]  U5        [        R                  " UR                  SS9U l        [        R                  " [        UR                  5       Vs/ sH  n[        X5      PM     sn5      U l
        g s  snf NFr   )rL   rM   rm   r   r<   normr  r  r>   r   r   )rN   rk   r  rP   s      rQ   rM   MoonshineDecoder.__init__l  s`     LL!3!3%@	mm;@AaAa;bc;bC"6/;bc
cs   A=r   r   r.   inputs_embedsrF   r   r   r   rO   ru   c
                    USL USL-  (       a  [        S5      eUc  U R                  U5      nU(       a"  Uc  [        5       n[        5       n[        X5      nUcD  Ub  UR	                  5       OSn[
        R                  " XUR                  S   -   UR                  S9nUc  UR                  S5      n[        U R                  UUUUUS9nUnU R                  X5      nU	b  UR                  S   nSnU	S	SSU24   S	SU24   n	U R                  R                  S
:X  a  U	S:H  R                  5       (       a  U	OSn	OaU R                  R                  S:X  a$  [        XR                   UR                  S   5      n	O#[#        XR                   UR                  S   5      n	U R$                   H  nU" UUU4U	UUUUUS.U
D6nM     U R'                  U5      n[)        UU(       a  US9$ SS9$ )a\  
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
    Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
    of the decoder.
encoder_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
    Mask to avoid performing attention on padding indices in `encoder_hidden_states`. Mask values selected in `[0, 1]`:
    - 1 for tokens that are **not masked**,
    - 0 for tokens that are **masked**.
    [What are attention masks?](../glossary#attention-mask)
Nz:You must specify exactly one of input_ids or inputs_embedsr   rX   r  )rk   input_embedsr   r   r.   r   r  .r  rY   r  )r   r   r   rF   r   r   )r  r.   )
ValueErrorembed_tokensr   r   get_seq_lengthr{   r$  r   r  r  r   rk   r  r   r"  r   r#  r   r   r3  r   )rN   r-  r   r   r.   r5  rF   r   r   r   rO   r   r   past_seen_tokenscausal_maskrt   r   r%  r&  decoder_layers                       rQ   rw   MoonshineDecoder.forwards  s7   0 -t";<YZZ  --i8M0#/> $0N!12F^O!CRC^==?de"\\ ]5H5H5K"KTaThThN )33A6L(;;&))+%
 &"oomJ!-,2226H *%;CATCTAT<T%UVY[d\d[dVd%e"{{//3FFDZ^aDaCfCfChCh)?nr&11V;)L*,?,?ATATUWAX*& *D*,?,?ATATUWAX*& "[[M)% (>).#-$7 M ) 		-08+/8O
 	
>B
 	
rS   )r   r3  )	NNNNNNNNN)rZ   r[   r\   r]   r   r   r   r   r)  r+   rM   r   r   r{   r   r|   r
   r   r   r   r   r   r   r   rw   rb   rc   rd   s   @rQ   r,  r,  d  sF   !O$%7q[Y.*+=QSab
 
  151537+/59$(59=A9=Y
E,,-Y
 !.Y
 u//0	Y

 "%Y
   1 12Y
 D>Y
 !!1!12Y
  ((9(9:Y
 !) 6Y
 +,Y
 
u--	.Y
 Y
rS   r,  c                      \ rS rSr\\          SS\\R                     S\\R                     S\\R                     S\\R                     S\\
\
\R                           S\\\\
\R                     4      S	\\
\R                        S
\\
\R                        S\\   S\\R                     S\\   S\4S jj5       5       rSrg)MoonshineModeli  Nr   r   decoder_input_idsdecoder_attention_maskencoder_outputsr.   decoder_inputs_embedsdecoder_position_idsrF   r   rO   ru   c                 >   Uc  U R                   " U4SU0UD6nU R                  " SUUUUR                  UUUU	U
S.	UD6n[        UR                  UR                  UR
                  UR                  UR                  UR                  UR
                  UR                  S9$ )a9  
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
    Float values of the raw speech waveform. Raw speech waveform can be
    obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
    `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or
    the soundfile library (`pip install soundfile`). To prepare the array into
    `input_values`, the [`AutoFeatureExtractor`] should be used for padding
    and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
    Indices of positions of each input sequence tokens in the position embeddings.
    Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`

Example:

```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, MoonshineModel
>>> from datasets import load_dataset

>>> model = MoonshineModel.from_pretrained("UsefulSensors/moonshine-tiny")
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
>>> last_hidden_state = model(input_values, decoder_input_ids=decoder_input_ids).last_hidden_state
>>> list(last_hidden_state.shape)
[1, 2, 288]
```
r   )	r-  r   r   r   r.   r5  r   rF   r   )r  r.   decoder_hidden_statesdecoder_attentionsr0  encoder_last_hidden_stater   encoder_attentionsr:   )encoderdecoderr  r   r.   rt   r   r0  )rN   r   r   rB  rC  rD  r.   rE  rF  rF   r   rO   decoder_outputss                rQ   rw   MoonshineModel.forward  s    \ "/3||L/rYg/rkq/rOEI\\ F
'1#1"1"C"C+/-)F
 F
 "-??+;;"1"?"?.99,==&5&G&G"1"?"?.99	
 		
rS   r:   )
NNNNNNNNNN)rZ   r[   r\   r]   r   r   r   r{   r   r   r   r   r   r   r   r   r   rw   rb   r:   rS   rQ   rA  rA    sQ    59598<=AEIZ^DHBF$(59E
u001E
 !!1!12E
 $E$4$45	E

 !))9)9 :E
 "%e.?.?(@"ABE
 "%(;U5CTCT=U(U"VWE
  (e.?.?(@AE
 'uU-=-='>?E
 D>E
 !!1!12E
 +,E
 
E
  E
rS   rA  zj
    The Moonshine Model with a language modeling head. Can be used for automatic speech recognition.
    )custom_introc                     ^  \ rS rSrS/rS\4U 4S jjrS rS rS r	S r
S	\R                  4S
 jr\\           SS\\R$                     S\\R&                     S\\R&                     S\\R&                     S\\\\R$                           S\\\\\R$                     4      S\\\R$                        S\\\R&                        S\\   S\\R&                     S\\R&                     S\\   S	\4S jj5       5       rSrU =r$ )!MoonshineForConditionalGenerationi  zproj_out.weightrk   c                    > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  UR                  SS9U l        U R                  5         g r2  )
rL   rM   rA  r   rm   rn   r<   r;   proj_outr  )rN   rk   rP   s     rQ   rM   *MoonshineForConditionalGeneration.__init__#  sH     #F+
		&"4"4f6G6GeT 	rS   c                 6    U R                   R                  5       $ ri   )r   get_encoderr  s    rQ   rW  -MoonshineForConditionalGeneration.get_encoder+      zz%%''rS   c                 6    U R                   R                  5       $ ri   )r   get_decoderr  s    rQ   r[  -MoonshineForConditionalGeneration.get_decoder.  rY  rS   c                     U R                   $ ri   rT  r  s    rQ   get_output_embeddings7MoonshineForConditionalGeneration.get_output_embeddings1  s    }}rS   c                     Xl         g ri   r^  )rN   new_embeddingss     rQ   set_output_embeddings7MoonshineForConditionalGeneration.set_output_embeddings4  s    &rS   ru   c                 6    U R                   R                  5       $ ri   )r   r  r  s    rQ   r  6MoonshineForConditionalGeneration.get_input_embeddings7  s    zz..00rS   r   r   rB  rC  rD  r.   rE  rF  rF   r   labelsrO   c                    Ub:  Uc7  Uc4  [        XR                  R                  U R                  R                  5      nU R                  " U4UUUUUUUU	U
S.	UD6nU R                  UR                  5      nSnUb$  U R                  XU R                  R                  S9n[        UUUR                  UR                  UR                  UR                  UR                  UR                  UR                   S9	$ )ag  
input_values (`torch.FloatTensor` of shape `(batch_size, audio_length)`):
    Float values of the raw speech waveform. Raw speech waveform can be
    obtained by loading a `.flac` or `.wav` audio file into an array of type `list[float]`, a
    `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec libary (`pip install torchcodec`) or
    the soundfile library (`pip install soundfile`). To prepare the array into
    `input_values`, the [`AutoFeatureExtractor`] should be used for padding
    and conversion into a tensor of type `torch.FloatTensor`.
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`):
    Indices of positions of each input sequence tokens in the position embeddings.
    Used to calculate the position embeddings up to `config.decoder_config.max_position_embeddings`

Example:

```python
>>> import torch
>>> from transformers import AutoProcessor, MoonshineForConditionalGeneration
>>> from datasets import load_dataset

>>> processor = AutoProcessor.from_pretrained("UsefulSensors/moonshine-tiny")
>>> model = MoonshineForConditionalGeneration.from_pretrained("UsefulSensors/moonshine-tiny")

>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")

>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
>>> input_values = inputs.input_values

>>> generated_ids = model.generate(input_values, max_new_tokens=100)

>>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> transcription
'Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
```N)	r   rB  rD  rC  r.   rE  rF  rF   r   )logitsrg  r;   )	lossri  r.   rH  rI  r0  rJ  r   rK  )r)   rk   pad_token_idr9   r   rT  r  loss_functionr;   r   r.   rH  rI  r0  rJ  r   rK  )rN   r   r   rB  rC  rD  r.   rE  rF  rF   r   rg  rO   outputsri  rj  s                   rQ   rw   )MoonshineForConditionalGeneration.forward:  s
   f  (-B-J$6KK44dkk6X6X%! '+jj'
)/+#9+"7!5)'
 '
 w889%%Vt{{OeOe%fD#33")"?"?&99$55&-&G&G")"?"?&99

 
	
rS   )r   rT  )NNNNNNNNNNN)rZ   r[   r\   r]   _tied_weights_keysr+   rM   rW  r[  r_  rc  rm   r*  r  r   r   r   r{   r   r   r   r   r   r   r   r   r   rw   rb   rc   rd   s   @rQ   rR  rR    s    ,, (('1bii 1  59598<=AEIZ^DHBF$(59-1T
u001T
 !!1!12T
 $E$4$45	T

 !))9)9 :T
 "%e.?.?(@"ABT
 "%(;U5CTCT=U(U"VWT
  (e.?.?(@AT
 'uU-=-='>?T
 D>T
 !!1!12T
 ))*T
 +,T
 
T
  T
rS   rR  )r+   rA  r   rR  )Ktypingr   r   r   r{   torch.nnrm   transformers.utils.genericr   r   activationsr	   cache_utilsr
   r   r   configuration_utilsr   
generationr   masking_utilsr   modeling_attn_mask_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   r   modeling_rope_utilsr   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r    glm.modeling_glmr"   r#   r$   llama.modeling_llamar%   r&   r'   whisper.modeling_whisperr(   r)   
get_loggerrZ   loggerr+   r*  rf   r~   r   r   r   r   r   r   r,  rA  rR  __all__r:   rS   rQ   <module>r     sT   - ,   I ! C C 3 ) / g B 9  : F & R R U U Y Y G 
		H	%J
& J
Z")) "))  k) k)\	1 	U- U"G6 GT # # #._
/ _
Di
z i
XH
\ H
V 
p
(@/ p

p
frS   