
    <h                     t   S SK r S SKrS SKJr  S SKJrJrJr  S SKr	S SK
r
S SKJr  S SKJr  SSKJr  SSKJr  SSKJr  SS	KJrJr  SS
KJr  SSKJr  SSKJrJrJrJrJ r   SSK!J"r"J#r#  SSK$J%r%  SSK&J'r'J(r(J)r)  SSK*J+r+  \(" 5       (       a  SSK,J-r-  \)R\                  " \/5      r0\\'" SS9 " S S\5      5       5       r1 " S S\Rd                  5      r3 " S S\Rd                  5      r4 " S S\5      r5 " S S\5      r6 " S S \5      r7 " S! S"\Rd                  5      r8 " S# S$\Rd                  5      r9   SSS%\Rd                  S&\
Rt                  S'\
Rt                  S(\
Rt                  S)\\
Rt                     S*\\;   S+\;S,\\
Rt                     4S- jjr< " S. S/\Rd                  5      r= " S0 S1\Rd                  5      r> " S2 S3\5      r? " S4 S5\Rd                  5      r@ " S6 S7\Rd                  5      rA " S8 S9\5      rB " S: S;\Rd                  5      rC " S< S=\Rd                  5      rD\' " S> S?\#5      5       rE  STS@\F\G\G4   SA\;SB\GS)\\
R                     SC\GSD\	R                  4SE jjrJ\ rK\' " SF SG\E5      5       rL\'" SHS9 " SI SJ\E5      5       rMSKrN\'" SLS9 " SM SN\E5      5       rO\'" SOS9 " SP SQ\E5      5       rP/ SRQrQg)U    N)	dataclass)CallableOptionalUnion)CrossEntropyLoss   )ACT2FN)is_deepspeed_zero3_enabled)is_fsdp_managed_module)_prepare_4d_attention_mask#_prepare_4d_attention_mask_for_sdpa)FlashAttentionKwargs)GradientCheckpointingLayer)BaseModelOutputCausalLMOutputModelOutputSequenceClassifierOutputWav2Vec2BaseModelOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringis_torch_flex_attn_availablelogging   )UniSpeechConfig)make_flex_block_causal_maskzh
    Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.
    )custom_introc                      \ rS rSr% SrSr\\R                     \	S'   Sr
\\R                     \	S'   Sr\\R                     \	S'   Sr\\R                     \	S'   Sr\\\R                        \	S'   Sr\\\R                        \	S	'   S
rg)UniSpeechForPreTrainingOutput:   a  
loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
    Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
    paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
    Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
    projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
    Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
    target vectors for contrastive loss.
codevector_perplexity (`torch.FloatTensor` of shape `(1,)`):
    The perplexity of the codevector distribution, used to measure the diversity of the codebook.
Nlossprojected_statesprojected_quantized_statescodevector_perplexityhidden_states
attentions )__name__
__module____qualname____firstlineno____doc__r"   r   torchFloatTensor__annotations__r#   r$   r%   r&   tupler'   __static_attributes__r(       h/var/www/html/shao/venv/lib/python3.13/site-packages/transformers/models/unispeech/modeling_unispeech.pyr    r    :   s     )-D(5$$
%,48hu0018>B):): ;B9=8E$5$56=8<M8E%"3"345<59Ju00129r3   r    c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )UniSpeechSamePadLayerW   c                 R   > [         TU ]  5         US-  S:X  a  SU l        g SU l        g )N   r   r   )super__init__num_pad_remove)selfnum_conv_pos_embeddings	__class__s     r4   r;   UniSpeechSamePadLayer.__init__X   s)    #:Q#>!#Car3   c                 X    U R                   S:  a  US S 2S S 2S U R                   * 24   nU$ Nr   r<   r=   r&   s     r4   forwardUniSpeechSamePadLayer.forward\   s6    ")!Q0F43F3F2F0F*FGMr3   rC   r)   r*   r+   r,   r;   rE   r2   __classcell__r?   s   @r4   r6   r6   W   s    K r3   r6   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ ) UniSpeechPositionalConvEmbeddingb   c                   > [         TU ]  5         [        R                  " UR                  UR                  UR
                  UR
                  S-  UR                  S9U l        [        R                  R                  n[        [        R                  R                  S5      (       a$  [        R                  R                  R                  n[        5       (       Ga%  SS KnUR                  R                  U R                  R                   SS9   U" U R                  SSS9U l        S S S 5        [        U R                  S5      (       aU  U R                  R                  R                   R"                  nU R                  R                  R                   R$                  nO,U R                  R&                  nU R                  R(                  nUR                  R+                  X5        UR                  R+                  X5        OU" U R                  SSS9U l        [-        UR
                  5      U l        [0        UR2                     U l        g ! , (       d  f       GN,= f)	Nr9   )kernel_sizepaddinggroupsweight_normr   )modifier_rankweight)namedimparametrizations)r:   r;   nnConv1dhidden_sizer>   num_conv_pos_embedding_groupsconvutilsrQ   hasattrrV   r
   	deepspeedzeroGatheredParametersrS   	original0	original1weight_gweight_vregister_external_parameterr6   rO   r	   feat_extract_activation
activation)r=   configrQ   r^   rc   rd   r?   s         r4   r;   )UniSpeechPositionalConvEmbedding.__init__c   s   II6622a777
	 hh**288,,m<<((33??K%''224993C3CST2U'		aH	 Vtyy"4559955<<FF9955<<FF99--99--NN66tFNN66tF#DIIH!DDI,V-K-KL !?!?@ VUs   I
Ic                     UR                  SS5      nU R                  U5      nU R                  U5      nU R                  U5      nUR                  SS5      nU$ )Nr   r9   )	transposer[   rO   rg   rD   s     r4   rE   (UniSpeechPositionalConvEmbedding.forward   sV    %//15		-0]36%//15r3   )rg   r[   rO   rG   rI   s   @r4   rK   rK   b   s    AB r3   rK   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )UniSpeechNoLayerNormConvLayer   c                 b  > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [        UR                     U l        g )Nr   r   rN   stridebias)r:   r;   conv_dimin_conv_dimout_conv_dimrW   rX   conv_kernelconv_stride	conv_biasr[   r	   rf   rg   r=   rh   layer_idr?   s      r4   r;   &UniSpeechNoLayerNormConvLayer.__init__   s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 !!?!?@r3   c                 J    U R                  U5      nU R                  U5      nU$ N)r[   rg   rD   s     r4   rE   %UniSpeechNoLayerNormConvLayer.forward   s$    		-06r3   )rg   r[   ru   rv   r   rG   rI   s   @r4   rn   rn      s    A r3   rn   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )UniSpeechLayerNormConvLayer   c                   > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [
        R                  " U R                  SS9U l        [        UR                     U l        g )Nr   r   rq   T)elementwise_affine)r:   r;   rt   ru   rv   rW   rX   rw   rx   ry   r[   	LayerNorm
layer_normr	   rf   rg   rz   s      r4   r;   $UniSpeechLayerNormConvLayer.__init__   s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 ,,t'8'8TR !?!?@r3   c                     U R                  U5      nUR                  SS5      nU R                  U5      nUR                  SS5      nU R                  U5      nU$ )N)r[   rk   r   rg   rD   s     r4   rE   #UniSpeechLayerNormConvLayer.forward   sV    		-0%//B76%//B76r3   rg   r[   ru   r   rv   r   rG   rI   s   @r4   r   r      s    A r3   r   c                   2   ^  \ rS rSrSU 4S jjrS rSrU =r$ )UniSpeechGroupNormConvLayer   c                   > [         TU ]  5         US:  a  UR                  US-
     OSU l        UR                  U   U l        [
        R                  " U R                  U R                  UR                  U   UR                  U   UR                  S9U l
        [        UR                     U l        [
        R                  " U R                  U R                  SS9U l        g )Nr   r   rq   T)
num_groupsnum_channelsaffine)r:   r;   rt   ru   rv   rW   rX   rw   rx   ry   r[   r	   rf   rg   	GroupNormr   rz   s      r4   r;   $UniSpeechGroupNormConvLayer.__init__   s    <DqL6??8a<8a"OOH5II**84%%h/!!
	 !!?!?@,,$2C2CRVRcRclpqr3   c                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ r~   )r[   r   rg   rD   s     r4   rE   #UniSpeechGroupNormConvLayer.forward   s2    		-066r3   r   r   rG   rI   s   @r4   r   r      s    r  r3   r   c                   8   ^  \ rS rSrSrU 4S jrS rS rSrU =r	$ )UniSpeechFeatureEncoder   z.Construct the features from raw audio waveformc           	        > [         TU ]  5         UR                  S:X  a?  [        USS9/[	        UR
                  S-
  5       Vs/ sH  n[        XS-   S9PM     sn-   nOUUR                  S:X  a,  [	        UR
                  5       Vs/ sH  n[        XS9PM     nnO[        SUR                   S35      e[        R                  " U5      U l        SU l        S	U l        g s  snf s  snf )
Ngroupr   )r{   r   layerz`config.feat_extract_norm` is z), but has to be one of ['group', 'layer']FT)r:   r;   feat_extract_normr   rangenum_feat_extract_layersrn   r   
ValueErrorrW   
ModuleListconv_layersgradient_checkpointing_requires_grad)r=   rh   ir   r?   s       r4   r;    UniSpeechFeatureEncoder.__init__   s    ##w.6vJKv==ABOBA .f1uEBO K %%0INvOmOmInInA+F?In  K 01I1I0JJst  ==5&+#"O
s   CC#c                 N    U R                  5        H
  nSUl        M     SU l        g NF)
parametersrequires_gradr   r=   params     r4   _freeze_parameters*UniSpeechFeatureEncoder._freeze_parameters   s#    __&E"'E '#r3   c                     US S 2S 4   nU R                   (       a  U R                  (       a  SUl        U R                   H  nU" U5      nM     U$ )NT)r   trainingr   r   )r=   input_valuesr&   
conv_layers       r4   rE   UniSpeechFeatureEncoder.forward   sK    $QW- 4==*.M'**J&}5M + r3   )r   r   r   )
r)   r*   r+   r,   r-   r;   r   rE   r2   rH   rI   s   @r4   r   r      s    8#($

 
r3   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )UniSpeechFeatureProjection   c                 4  > [         TU ]  5         [        R                  " UR                  S   UR
                  S9U l        [        R                  " UR                  S   UR                  5      U l	        [        R                  " UR                  5      U l        g )Nr   eps)r:   r;   rW   r   rt   layer_norm_epsr   LinearrY   
projectionDropoutfeat_proj_dropoutdropoutr=   rh   r?   s     r4   r;   #UniSpeechFeatureProjection.__init__  sf    ,,vr':@U@UV))FOOB$79K9KLzz&":":;r3   c                 n    U R                  U5      nU R                  U5      nU R                  U5      nX4$ r~   )r   r   r   )r=   r&   norm_hidden_statess      r4   rE   "UniSpeechFeatureProjection.forward  s7    !__];(:;]300r3   )r   r   r   rG   rI   s   @r4   r   r      s    <1 1r3   r   modulequerykeyvalueattention_maskscalingr   	head_maskc                    Uc  UR                  S5      S-  n[        R                  " XR                  SS5      5      U-  n	Ub  X-   n	[        R
                  R                  U	SS9n	Ub  XR                  SSSS5      -  n	[        R
                  R                  XU R                  S9n	[        R                  " X5      n
U
R                  SS5      R                  5       n
X4$ )Nr         r9   r   rU   r   )pr   )sizer.   matmulrk   rW   
functionalsoftmaxviewr   r   
contiguous)r   r   r   r   r   r   r   r   kwargsattn_weightsattn_outputs              r4   eager_attention_forwardr     s     **R.D(<<}}Q':;gEL!#4==((2(>L#nnQAq&AA==((6??([L,,|3K''1-88:K$$r3   c                   Z  ^  \ rS rSrSr     SS\S\S\S\S\S\S	\\	   4U 4S
 jjjr
    SS\R                  S\\R                     S\\R                     S\\R                     S\\   S\\   S\\R                  \\R                     \\\R                        4   4S jjrSrU =r$ )UniSpeechAttentioni-  z=Multi-headed attention from 'Attention Is All You Need' paper	embed_dim	num_headsr   
is_decoderrs   	is_causalrh   c                   > [         TU ]  5         Xl        X l        X0l        X-  U l        Xpl        U R
                  U-  U R                  :w  a  [        SU R                   SU S35      eU R
                  S-  U l        X@l	        X`l
        [        R                  " XUS9U l        [        R                  " XUS9U l        [        R                  " XUS9U l        [        R                  " XUS9U l        g )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).r   )rs   )r:   r;   r   r   r   head_dimrh   r   r   r   r   rW   r   k_projv_projq_projout_proj)	r=   r   r   r   r   rs   r   rh   r?   s	           r4   r;   UniSpeechAttention.__init__0  s     	""!.MMI%$..8MdnnM]$YKr3  }}d*$"ii	4@ii	4@ii	4@		)TBr3   r&   key_value_statesr   layer_head_maskoutput_attentionsr   returnc                     USLnUR                   SS u  pU(       a  UR                   S   OU	n
XSU R                  4nXSU R                  4nU R                  U5      R                  " U6 R	                  SS5      nU(       a  UOUnU R                  U5      R                  " U6 R	                  SS5      nU R                  U5      R                  " U6 R	                  SS5      n[        nU R                  R                  S:w  a  [        U R                  R                     nU" U UUUU4U R                  (       d  SOU R                  U R                  UUS.UD6u  nnUR                  XS5      R                  5       nU R!                  U5      nUUS4$ )z#Input shape: Batch x Time x ChannelNr   r   r9   eager        )r   r   r   r   )shaper   r   r   rk   r   r   r   rh   _attn_implementationr   r   r   r   reshaper   r   )r=   r&   r   r   r   r   r   is_cross_attentionbsztgt_lensrc_lenq_input_shapekv_input_shapequery_statescurrent_states
key_statesvalue_statesattention_interfacer   r   s                       r4   rE   UniSpeechAttention.forwardO  s    .T9 %**3B//A"((+wr4==9DMM: {{=166FPPQRTUV-?)][[055~FPPQRTUV
{{>277HRRSTVWX(?;;++w6"9$++:Z:Z"[$7%
  $}}C$,,LL/%%
 %
!\ "))#;FFHmmK0L$..r3   )rh   r   r   r   r   r   r   r   r   r   r   r   )r   FTFN)NNNF)r)   r*   r+   r,   r-   intfloatboolr   r   r;   r.   Tensorr   r   r1   rE   r2   rH   rI   s   @r4   r   r   -  s    G  ,0CC C 	C
 C C C )C CD 481526,13/||3/ #5<<03/ !.	3/
 "%,,/3/ $D>3/ -.3/ 
u||Xell3XeELL>Q5RR	S3/ 3/r3   r   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )UniSpeechFeedForwardi  c                   > [         TU ]  5         [        R                  " UR                  5      U l        [        R                  " UR                  UR                  5      U l	        [        UR                  [        5      (       a  [        UR                     U l        OUR                  U l        [        R                  " UR                  UR                  5      U l        [        R                  " UR                   5      U l        g r~   )r:   r;   rW   r   activation_dropoutintermediate_dropoutr   rY   intermediate_sizeintermediate_dense
isinstance
hidden_actstrr	   intermediate_act_fnoutput_densehidden_dropoutoutput_dropoutr   s     r4   r;   UniSpeechFeedForward.__init__  s    $&JJv/H/H$I!"$))F,>,>@X@X"Yf''--'-f.?.?'@D$'-'8'8D$IIf&>&>@R@RS jj)>)>?r3   c                     U R                  U5      nU R                  U5      nU R                  U5      nU R                  U5      nU R	                  U5      nU$ r~   )r	  r  r  r  r  rD   s     r4   rE   UniSpeechFeedForward.forward  sX    //>00?11-@))-8++M:r3   )r  r	  r  r  r  rG   rI   s   @r4   r  r    s    @ r3   r  c                   2   ^  \ rS rSrU 4S jrSS jrSrU =r$ )UniSpeechEncoderLayeri  c                   > [         TU ]  5         [        UR                  UR                  UR
                  SUS9U l        [        R                  " UR                  5      U l
        [        R                  " UR                  UR                  S9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l        g )NFr   r   r   r   rh   r   )r:   r;   r   rY   num_attention_headsattention_dropout	attentionrW   r   r  r   r   r   r   r  feed_forwardfinal_layer_normr   s     r4   r;   UniSpeechEncoderLayer.__init__  s    +((00,,
 zz&"7"78,,v'9'9v?T?TU08 "V-?-?VEZEZ [r3   c                     UnU R                  XUS9u  pnU R                  U5      nXA-   nU R                  U5      nXR                  U5      -   nU R	                  U5      nU4nU(       a  Xu4-  nU$ Nr   r   )r  r   r   r  r  r=   r&   r   r   attn_residualr   _outputss           r4   rE   UniSpeechEncoderLayer.forward  s    %)-L] *8 *
&Q ]3%56%(9(9-(HH--m< "&Gr3   )r  r   r  r  r   r   rG   rI   s   @r4   r  r    s    \ r3   r  c                      ^  \ rS rSrU 4S jr    SS\R                  S\\R                     S\	S\	S\	4
S	 jjr
S\\R                  S4   S
\R                  4S jrSrU =r$ )UniSpeechEncoderi  c                   > [         TU ]  5         Xl        [        U5      U l        [
        R                  " UR                  UR                  S9U l	        [
        R                  " UR                  5      U l        [
        R                  " [        UR                  5       Vs/ sH  n[!        U5      PM     sn5      U l        SU l        g s  snf Nr   F)r:   r;   rh   rK   pos_conv_embedrW   r   rY   r   r   r   r  r   r   r   num_hidden_layersr  layersr   r=   rh   r#  r?   s      r4   r;   UniSpeechEncoder.__init__  s    >vF,,v'9'9v?T?TUzz&"7"78mmERXRjRjLk$lLkq%:6%BLk$lm&+# %m    CNr&   r   r   output_hidden_statesreturn_dictc                    U(       a  SOS nU(       a  SOS nUb4  UR                  S5      R                  SSUR                  S   5      nSX) '   U R                  UU5      nU R	                  U5      n	X-   nU R                  U5      nU R                  U5      n[        5       =(       d    [        U 5      n
U R                   H  nU(       a  Xa4-   n[        R                  " / 5      nU R                  =(       a    XR                  R                  :  nU(       a  U
(       a  U" XUS9nUS   nU(       a  SnU(       d  M|  UWS   4-   nM     U(       a  Xa4-   nU(       d  [        S XU4 5       5      $ [!        UUUS	9$ )
Nr(   r   r   r9   r   r   NNc              3   ,   #    U H  oc  M  Uv   M     g 7fr~   r(   .0vs     r4   	<genexpr>+UniSpeechEncoder.forward.<locals>.<genexpr>        m$[q$[   	last_hidden_stater&   r'   )	unsqueezerepeatr   _update_full_maskr*  r   r   r
   r   r,  r.   randr   rh   	layerdropr1   r   r=   r&   r   r   r0  r1  all_hidden_statesall_self_attentionsexpand_attention_maskposition_embeddingssynced_gpusr   dropout_probabilityskip_the_layerlayer_outputss                  r4   rE   UniSpeechEncoder.forward  s    #7BD$5b4%$2$<$<R$@$G$G1mNaNabcNd$e!45M01//

 #11-@%;6]302R6LT6R[[E#$58H$H! #(**R.!]]Z/B[[EZEZ/ZN![ %!Te! !.a 0 ,  &9]1=M<O&O#' !*   14D Dm]GZ$[mmm++*
 	
r3   inputs_embedsc                 r   Ub  U R                   R                  S:X  a  SU;   a  UnU$ S nU$ U R                   R                  S:X  a  [        XR                  5      nU$ U R                   R                  S:X  a+  [	        U[
        R                  5      (       a
  [        USS9nU$ [        XR                  5      nU$ Nflash_attention_2r   sdpaflex_attentionF)r   	rh   r   r   dtyper
  r.   r  r   r   r=   r   rM  s      r4   r@  "UniSpeechEncoder._update_full_mask      
 %{{//3FF343F  MQ  11V; "E^UhUh!i  115EEnell;;%@[`%aN
  "<NL_L_!`r3   rh   r   r   r   r,  r*  NFFT)r)   r*   r+   r,   r;   r.   tensorr   r  r  rE   r   r@  r2   rH   rI   s   @r4   r'  r'    s    , 26"'%* :
||:
 !.:
  	:

 #:
 :
xellD01 || r3   r'  c                   J   ^  \ rS rSrU 4S jrS\R                  4S jrSrU =r	$ )UniSpeechAttnAdapterLayeri  c                   > [         TU ]  5         UR                  U l        UR                  U l        [        R                  " U R
                  5      U l        [        R                  " U R
                  U R                  5      U l
        [        R                  " 5       U l        [        R                  " U R                  U R
                  5      U l        g)z
Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
up training throughput.
N)r:   r;   adapter_attn_dim	input_dimrY   
hidden_dimrW   r   normr   linear_1ReLUact_fnlinear_2r   s     r4   r;   "UniSpeechAttnAdapterLayer.__init__  s    
 	00 ,,LL1			$//4>>Bggi		$..$//Br3   r&   c                     U R                  U5      nU R                  U5      nU R                  U5      nU R                  U5      nU$ r~   )ra  rb  rd  re  rD   s     r4   rE   !UniSpeechAttnAdapterLayer.forward-  s@    		-0m4M2m4r3   )rd  r`  r_  rb  re  ra  )
r)   r*   r+   r,   r;   r.   r/   rE   r2   rH   rI   s   @r4   r\  r\    s     CU%6%6  r3   r\  c                   t   ^  \ rS rSrU 4S jr  SS\R                  S\\R                     S\4S jjr	Sr
U =r$ )	$UniSpeechEncoderLayerStableLayerNormi7  c                   > [         TU ]  5         [        UR                  UR                  UR
                  SUS9U l        [        R                  " UR                  5      U l
        [        R                  " UR                  UR                  S9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l        [#        USS 5      b  [%        U5      U l        g S U l        g )NFr  r   r^  )r:   r;   r   rY   r  r  r  rW   r   r  r   r   r   r   r  r  r  getattrr\  adapter_layerr   s     r4   r;   -UniSpeechEncoderLayerStableLayerNorm.__init__8  s    +((00,,
 zz&"7"78,,v'9'9v?T?TU08 "V-?-?VEZEZ [6-t4@!:6!BD!%Dr3   r&   r   r   c                    UnU R                  U5      nU R                  XUS9u  pnU R                  U5      nXA-   nXR                  U R	                  U5      5      -   nU R
                  b  XR                  U5      -   nU4nU(       a  Xu4-  nU$ r  )r   r  r   r  r  rm  r!  s           r4   rE   ,UniSpeechEncoderLayerStableLayerNorm.forwardK  s     &6)-L] *8 *
&Q ]3%5%(9(9$:O:OP]:^(__)),>,>},MMM "&Gr3   )rm  r  r   r  r  r   r   )r)   r*   r+   r,   r;   r.   r  r   r  rE   r2   rH   rI   s   @r4   rj  rj  7  sC    &, 26"'	|| !.  	 r3   rj  c                   ~   ^  \ rS rSrU 4S jr    S	S jrS\\R                  S4   S\R                  4S jr	Sr
U =r$ )
UniSpeechEncoderStableLayerNormie  c                   > [         TU ]  5         Xl        [        U5      U l        [
        R                  " UR                  UR                  S9U l	        [
        R                  " UR                  5      U l        [
        R                  " [        UR                  5       Vs/ sH  n[!        U5      PM     sn5      U l        SU l        g s  snf r)  )r:   r;   rh   rK   r*  rW   r   rY   r   r   r   r  r   r   r   r+  rj  r,  r   r-  s      r4   r;   (UniSpeechEncoderStableLayerNorm.__init__f  s    >vF,,v'9'9v?T?TUzz&"7"78mmCHIaIaCbcCba1&9Cbc
 ',# dr/  Nc                    U(       a  SOS nU(       a  SOS nUb4  UR                  S5      R                  SSUR                  S   5      nSX) '   U R                  UU5      nU R	                  U5      n	X-   nU R                  U5      n[        5       =(       d    [        U 5      n
U R                   H  nU(       a  Xa4-   n[        R                  " / 5      nU R                  =(       a    XR                  R                  :  nU(       a  U
(       a  U" XUS9nUS   nU(       a  SnU(       d  M|  UWS   4-   nM     U R                  U5      nU(       a  Xa4-   nU(       d  [        S XU4 5       5      $ [!        UUUS	9$ )
Nr(   r   r   r9   r   r   r3  c              3   ,   #    U H  oc  M  Uv   M     g 7fr~   r(   r5  s     r4   r8  :UniSpeechEncoderStableLayerNorm.forward.<locals>.<genexpr>  r:  r;  r<  )r>  r?  r   r@  r*  r   r
   r   r,  r.   rA  r   rh   rB  r   r1   r   rC  s                  r4   rE   'UniSpeechEncoderStableLayerNorm.forwardq  s    #7BD$5b4%$2$<$<R$@$G$G1mNaNabcNd$e!45M01//

 #11-@%;]302R6LT6R[[E#$58H$H! #(**R.!]]Z/B[[EZEZ/ZN![ !&!Te! !.a 0 ,  &9]1=M<O&O#) !, 6 14D Dm]GZ$[mmm++*
 	
r3   r   rM  c                 r   Ub  U R                   R                  S:X  a  SU;   a  UnU$ S nU$ U R                   R                  S:X  a  [        XR                  5      nU$ U R                   R                  S:X  a+  [	        U[
        R                  5      (       a
  [        USS9nU$ [        XR                  5      nU$ rO  rS  rU  s      r4   r@  1UniSpeechEncoderStableLayerNorm._update_full_mask  rW  r3   rX  rY  )r)   r*   r+   r,   r;   rE   r   r.   r  r@  r2   rH   rI   s   @r4   rr  rr  e  sJ    	, "<
|ellD01 || r3   rr  c                   B   ^  \ rS rSrSrU 4S jr\S 5       rS rSr	U =r
$ )UniSpeechGumbelVectorQuantizeri  z
Vector quantization using gumbel softmax. See `[CATEGORICAL REPARAMETERIZATION WITH
GUMBEL-SOFTMAX](https://huggingface.co/papers/1611.01144) for more information.
c                 8  > [         TU ]  5         UR                  U l        UR                  U l        UR                  U R                  -  S:w  a&  [        SUR                   SU R                   S35      e[        R                  " [        R                  " SU R                  U R
                  -  UR                  U R                  -  5      5      U l        [        R                  " UR                  S   U R                  U R
                  -  5      U l        SU l        g )Nr   z`config.codevector_dim z5 must be divisible by `config.num_codevector_groups` z for concatenationr   r   r9   )r:   r;   num_codevector_groupsr   num_codevectors_per_groupnum_varscodevector_dimr   rW   	Parameterr.   r/   codevectorsr   rt   weight_projtemperaturer   s     r4   r;   'UniSpeechGumbelVectorQuantizer.__init__  s     6688  4??2a7)&*?*?)@ A559__4EEWY  <<a4==!@&BWBW[_[j[jBjk
 99V__R%8$//DMM:YZ r3   c           	          U R                  SS9n[        R                  " [        R                  " U[        R                  " US-   5      -  SS9* 5      R                  5       nU$ )Nr   r   gHz>r   )meanr.   expsumlog)probsmarginal_probs
perplexitys      r4   _compute_perplexity2UniSpeechGumbelVectorQuantizer._compute_perplexity  sR    *YY		.599^VZEZ;[*[ac ddeiik
r3   c                    UR                   u  p#nU R                  U5      nUR                  X#-  U R                  -  S5      nU R                  (       a  [
        R                  R                  UR                  5       U R                  SS9R                  U5      n[        R                  " UR                  X#-  U R                  S5      R                  5       SS9nU R                  U5      nOyUR                  SS9nUR                  " UR                   6 R!                  SUR                  SS5      S5      nUR                  X#-  U R                  S5      nU R                  U5      nUR                  X#-  S5      nUR#                  S5      U R$                  -  n	U	R                  X#-  U R                  U R&                  S5      n
U
R)                  S5      R                  X#S5      n
X4$ )Nr   T)tauhardr   r         ?r   )r   r  r   r   r   rW   r   gumbel_softmaxr   r  type_asr.   r   r  argmax	new_zerosscatter_r>  r  r  r  )r=   r&   
batch_sizesequence_lengthrY   codevector_probscodevector_soft_distr  codevector_idxcodevectors_per_groupr  s              r4   rE   &UniSpeechGumbelVectorQuantizer.forward  s   3@3F3F0
[ ((7%**:+G$//+Y[]^==!}};;##%4+;+;$  <  gm$ 
 $)=="":#?RTU[[]ce$  112FGJ +11b19N,668K8KLUUN''A.   044Z5QSWSbSbdfg112BCJ+001MrR 0 : :2 >AQAQ Q+001Mt`d`m`moqr!oob)..zBO&&r3   )r  r   r  r  r  )r)   r*   r+   r,   r-   r;   staticmethodr  rE   r2   rH   rI   s   @r4   r|  r|    s+    
(  
#' #'r3   r|  c                       \ rS rSr% \\S'   SrSrSrSr	Sr
SrS rS\\R                  \4   4S jrS	\S
\R                  4S jrSrg)UniSpeechPreTrainedModeli  rh   	unispeechr   Tc           
         [        U[        5      (       a  UR                  R                  R                  R                  SSS9  UR                  R                  R                  R                  5         [        R                  R                  UR                  5        g[        U[        5      (       a  [        R                  R                  UR                  R                  SS[        R                  " SUR                  R                   S   UR                  R"                  -  -  5      -  S9  [        R                  R%                  UR                  R                  S5        g[        U[&        5      (       a  [        R                  " SUR(                  R*                  -  5      n[        R                  R                  UR(                  R                  U* US9  [        R                  R                  UR(                  R                  U* US9  g[        U[        R,                  5      (       ak  UR                  R                  R                  SU R.                  R0                  S9  UR                  b%  UR                  R                  R                  5         gg[        U[        R2                  [        R4                  45      (       aJ  UR                  R                  R                  5         UR                  R                  R7                  S5        g[        U[        R8                  5      (       a  [        R                  R;                  UR                  5        UR                  bh  [        R                  " UR<                  UR"                  UR                   S   -  -  5      n[        R                  R                  UR                  U* US9  ggg)	zInitialize the weightsr   r   )r  stdr   r9   )abNr  )r
  r|  r  rS   datanormal_rs   zero_rW   inituniform_r  rK   r[   mathsqrtrN   in_channels	constant_r   r   in_featuresr   rh   initializer_ranger   r   fill_rX   kaiming_normal_rP   )r=   r   ks      r4   _init_weights&UniSpeechPreTrainedModel._init_weights  s    f<==%%**222C##((..0GGV//0 @AAGGOO""		!v{{'>'>q'AFKKD[D['["\]]  
 GGfkk..2 :;;		!f//;;;<AGGV..55!qAGGV..33rQ?		**MM&&CT[[5R5R&S{{&  &&( 'r|| <==KK""$MM$$S)		**GG##FMM2{{&IIfmmv/A/AFDVDVWXDY/YZ[  a 8 ' +r3   input_lengthsc                     S n[        U R                  R                  U R                  R                  5       H  u  p4U" XU5      nM     U$ )z8
Computes the output length of the convolutional layers
c                 8    [         R                  " X-
  USS9S-   $ )Nfloor)rounding_moder   )r.   div)input_lengthrN   rr   s      r4   _conv_out_lengthSUniSpeechPreTrainedModel._get_feat_extract_output_lengths.<locals>._conv_out_length<  s      99\7wWZ[[[r3   )ziprh   rw   rx   )r=   r  r  rN   rr   s        r4    _get_feat_extract_output_lengths9UniSpeechPreTrainedModel._get_feat_extract_output_lengths7  sG    
	\
 $'t{{'>'>@W@W#XK,]PM $Y r3   feature_vector_lengthr   c                    UR                  SS9S S 2S4   nU R                  U5      R                  [        R                  5      nUR
                  S   n[        R                  " XQ4UR                  UR                  S9nSU[        R                  " UR
                  S   UR                  S9US-
  4'   UR                  S/5      R                  S5      R                  S/5      R                  5       nU$ )Nr   r   r   )rT  devicer   )r  )cumsumr  tor.   longr   zerosrT  r  arangeflipr  )r=   r  r   non_padded_lengthsoutput_lengthsr  s         r4   "_get_feature_vector_attention_mask;UniSpeechPreTrainedModel._get_feature_vector_attention_maskF  s     ,22r2:1b5A>>?QRUUV[V`V`a#))!,
/~7K7KTbTiTi
 uv^%9%9!%<^EZEZ[]kno]opq',,bT299"=BBB4HMMOr3   r(   N)r)   r*   r+   r,   r   r0   base_model_prefixmain_input_namesupports_gradient_checkpointing_supports_flash_attn_supports_sdpa_supports_flex_attnr  r   r.   
LongTensorr   r  r  r2   r(   r3   r4   r  r    sg    #$O&*#N9BeEDTDTVYDY>Z  ]b]m]m r3   r  r   	mask_probmask_length	min_masksr   c           	        ^^^^^ U u  nmTS:  a  [        S5      eTT:  a  [        ST ST S35      e[        R                  R                  S5      R	                  5       mUUUUU4S jnUb-  UR                  5       R                  S5      R                  5       O[        U5       Vs/ sH  nTPM     snn[        R                  " UT4[        S	9n	/ n
U" T5      nUS
:X  a  U	$ U H  nU" U5      n[        R                  R                  [        R                  " UTS-
  -
  5      USS9n[        U5      S
:X  a  TS-
  nOUS
   n[        R                  " U[        R                  " X-
  [        R                   S	9U-  /5      nU
R#                  U5        M     [        R$                  " U
5      n
[        R&                  " U
SS2SS2S4   X[T45      n
U
R)                  X[T-  5      n
[        R                  " T5      SSSS24   n[        R&                  " UX[T45      R)                  X[T-  5      nU
U-   n
U
R+                  5       TS-
  :  a  TS-
  XTS-
  :  '   [        R,                  " XSS5        U	$ s  snf )a2  
Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
ASR](https://huggingface.co/papers/1904.08779). Note that this method is not optimized to run on TPU and should be run on
CPU as part of the preprocessing during training.

Args:
    shape: The shape for which to compute masks. This should be of a tuple of size 2 where
           the first element is the batch size and the second element is the length of the axis to span.
    mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                independently generated mask spans of length `mask_length` is computed by
                `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                actual percentage will be smaller.
    mask_length: size of the mask
    min_masks: minimum number of masked spans
    attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                    each batch dimension.
r   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                    > [        TU -  T-  T-   5      n[        UT5      nUT-  T:  a  TT-  nU TS-
  -
  U:  a  [        U TS-
  -
  S5      nU$ )z;Given input length, compute how many spans should be maskedr   r   )r   max)r  num_masked_spanepsilonr  r  r  r  s     r4   compute_num_masked_span6_compute_mask_indices.<locals>.compute_num_masked_span|  so    i,6DwNOoy9 [(?:-<O ;?+o=!,+/"BAFOr3   Nr   rT  r   F)replace)r   nprandomrA  itemdetachr  tolistr   r  r  choicer  lenconcatenateonesint32appendarraybroadcast_tor   r  put_along_axis)r   r  r  r   r  r  r  r#  r  spec_aug_maskspec_aug_mask_idxsmax_num_masked_spanr  r  spec_aug_mask_idxdummy_mask_idxoffsetsr  r  s    `` `            @@r4   _compute_mask_indicesr  V  s   0 #(JQABB_$]^i]j&&7q:
 	
 iinnQ$$&G $ % 	##B'..0',Z'89'8!o'89  HHj/:$GM1/Ba%1,? II,,IIlkAo67RW - 
  !Q& -q0N.q1NNN(;(MUWU]U] ^ao op
 	!!"34/ &2 "45 1a:&+(V ,33JVa@ab ii$T4]3Goog
'UV^^+5G ,g5 /A"55GVYZGZ!0CCD mB?w :s   (I/c                   >  ^  \ rS rSrS\4U 4S jjr  SS\R                  S\\R                     S\\R                     4S jjr
\     SS\\R                     S\\R                     S\\R                     S	\\   S
\\   S\\   S\\\4   4S jj5       rSrU =r$ )UniSpeechModeli  rh   c                   > [         TU ]  U5        Xl        [        U5      U l        [        U5      U l        UR                  S:  d  UR                  S:  aG  [        R                  " [        R                  " UR                  5      R                  5       5      U l        UR                   (       a  [#        U5      U l        O['        U5      U l        U R)                  5         g )Nr   )r:   r;   rh   r   feature_extractorr   feature_projectionmask_time_probmask_feature_probrW   r  r.   r  rY   r  masked_spec_embeddo_stable_layer_normrr  encoderr'  	post_initr   s     r4   r;   UniSpeechModel.__init__  s     !8!@"<V"D  3&&*B*BS*H%'\\%,,v?Q?Q2R2[2[2]%^D"&&:6BDL+F3DL 	r3   r&   mask_time_indicesr   c                    [        U R                  SS5      (       d  U$ UR                  5       u  pEnUb(  U R                  R	                  UR
                  5      X'   OU R                  R                  S:  a  U R                  (       a  [        XE4U R                  R                  U R                  R                  UU R                  R                  S9n[        R                  " X!R                  [        R                  S9nU R                  R	                  UR
                  5      X'   U R                  R                  S:  a  U R                  (       a  [        XF4U R                  R                  U R                  R                   U R                  R"                  S9n[        R                  " XqR                  [        R                  S9nUSS2S4   R%                  SUS5      nSX'   U$ )	z
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
apply_spec_augmentTNr   )r  r  r   r  )r  rT  )r  r  r  r   )rl  rh   r   r  r  rT  r   r   r  mask_time_lengthmask_time_min_masksr.   rZ  r  r  r  mask_feature_lengthmask_feature_min_masksexpand)r=   r&   r  r   r  r  rY   mask_feature_indicess           r4   _mask_hidden_states"UniSpeechModel._mask_hidden_states  s    t{{$8$??   4A3E3E3G0
[(/3/E/E/H/HI\I\/]M,[[''!+ 5-++44 KK88-++99! !&->G[G[chcmcm n/3/E/E/H/HI\I\/]M,;;((1,#8)++77 KK;;++<<	$  $)<<0DMaMainisis#t #74#@#G#GO]_#` 23M/r3   r   r   r0  r1  r   c                    Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nU R	                  U5      nUR                  SS5      nUb  U R                  UR                  S   U5      nU R                  U5      u  pU R                  XUS9nU R                  UUUUUS9n	U	S   nU(       d	  X4U	SS -   $ [        UUU	R                  U	R                  S9$ )a  
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
    masked extracted features in *config.proj_codevector_dim* space.
Nr   r9   )r  r   r   r   r0  r1  r   )r=  extract_featuresr&   r'   )rh   r   r0  use_return_dictr  rk   r  r   r  r  r  UniSpeechBaseModelOutputr&   r'   )
r=   r   r   r  r   r0  r1  r  r&   encoder_outputss
             r4   rE   UniSpeechModel.forward  s7    2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]11,?+55a;%!DDEUE[E[\]E^`noN*.*A*ABR*S'00~ 1 
 ,,)/!5# ' 
 (*!4qr7JJJ'+-)77&11	
 	
r3   )rh   r  r  r  r  r3  NNNNN)r)   r*   r+   r,   r   r;   r.   r/   r   r  r  r   r  r  r   r1   r  rE   r2   rH   rI   s   @r4   r  r    s     ( :>59	,((, $E$5$56, !!1!12	,\  269=,0/3&*2
u||,2
 !.2
 $E$5$56	2

 $D>2
 'tn2
 d^2
 
u..	/2
 2
r3   r  zZ
    UniSpeech Model with a vector-quantization module and ctc loss for pre-training.
    c                   8  ^  \ rS rSrS\4U 4S jjrS\4S jrS rS r	\
 SS\R                  S	\R                  S
\R                  S\4S jj5       r\    SS\\R                      S\\R                      S\\   S\\   S\\   S\\\4   4S jj5       rSrU =r$ )UniSpeechForPreTrainingiG  rh   c                 8  > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  5      U l        [        U5      U l	        [        R                  " UR                  UR                  5      U l        [        R                  " UR                  UR                  5      U l        [        R                  " UR                  UR                   5      U l        [        R
                  " UR$                  5      U l        U R)                  5         g r~   )r:   r;   r  r  rW   r   feat_quantizer_dropoutdropout_featuresr|  	quantizerr   r  proj_codevector_dim	project_qrY   project_hidnum_ctc_classesctc_projfinal_dropoutr   r  r   s     r4   r;    UniSpeechForPreTraining.__init__M  s     '/ "

6+H+H I7?6#8#8&:T:TU99V%?%?ASAST		&"4"4f6L6LMzz&"6"67 	r3   r  c                 $    XR                   l        g)zR
Set the Gumbel softmax temperature to a given value. Only necessary for training
N)r  r  )r=   r  s     r4   set_gumbel_temperature.UniSpeechForPreTraining.set_gumbel_temperature\  s     &1"r3   c                 Z    [         R                  " S[        5        U R                  5         gz
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. Please use the equivalent `freeze_feature_encoder` method instead.NwarningswarnFutureWarningfreeze_feature_encoderr=   s    r4   freeze_feature_extractor0UniSpeechForPreTraining.freeze_feature_extractorb  '    
 	Q	

 	##%r3   c                 L    U R                   R                  R                  5         g
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
Nr  r  r   r2  s    r4   r1  .UniSpeechForPreTraining.freeze_feature_encodern      
 	((;;=r3   target_featuresnegative_featurespredicted_featuresc                     [         R                  " X/SS9n [         R                  " UR                  5       U R                  5       SS9nUR	                  U 5      nXC-  nU$ )z
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
r   r   r   )r.   catcosine_similarityr   r  )r<  r=  r>  r  logitss        r4   compute_contrastive_logits2UniSpeechForPreTraining.compute_contrastive_logitsu  s\      ))_$HaP(();)A)A)C_EZEZE\bde0 %r3   r   r   r   r0  r1  r   c           	         Ub  UOU R                   R                  nU R                  UUUUUS9nUS   nU R                  US   5      nU R	                  U5      u  pU R                  U	R                  U R
                  R                  R                  5      5      n	U R                  U	5      n	[        R                  " UR                  S5      UR                  S5      5      R                  U R                   R                  5      nUR                  SS5      n[        R                   " U5      R#                  5       R                  UR$                  5      nUR                  SS5      nUR'                  S5      nUR)                  US5      U	R)                  U) S5      -   nU R+                  U5      nU R-                  U5      nSnU(       d  Ub
  XX4USS -   $ XyU
4USS -   $ [/        UUU	U
UR0                  UR2                  S9$ )	a[  
Example:

```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, UniSpeechForPreTraining

>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> # TODO: Add full pretraining example
```Nr  r   r   r   r   r9   )r"   r#   r$   r%   r&   r'   )rh   r  r  r  r  r!  r  rS   rT  r"  r.   emptyr   r  replace_probrk   	bernoullir  r  r>  masked_fillr   r$  r    r&   r'   )r=   r   r   r   r0  r1  r$  transformer_featuresr  quantized_featuresr%   prob_replace_matrixsampled_replace_matrixrB  r"   s                  r4   rE   UniSpeechForPreTraining.forward  s   * &1%<k$++B]B]..)/!5# ! 
  'qz  00<48NNCS4T1 "^^,>,A,A$..BWBWB]B],^_!--.@A#kk*>*C*CA*FH\HaHabcHdekkKK$$
 2;;AqA!&1D!E!J!J!L!O!OPdPkPk!l!7!A!A!Q!G!7!A!A"!E%112H#N**,B+BCH

 f%v& 4F^ahijikalll(>STW^_`_aWbbb,1'9"7!//))
 	
r3   )r$  r   r  r"  r!  r  r  )r   )NNNN)r)   r*   r+   r,   r   r;   r   r(  r3  r1  r  r.   r/   rC  r   r   r  r  r   r1   r    rE   r2   rH   rI   s   @r4   r  r  G  s    1# 1
&> 
 	** ,, "-- 	 &  26,0/3&*D
u||,D
 !.D
 $D>	D

 'tnD
 d^D
 
u33	4D
 D
r3   r  r9   zq
    UniSpeech Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).
    c                      ^  \ rS rSrSS\\   4U 4S jjjrS rS rS r	S r
\     SS\\R                     S	\\R                     S
\\   S\\   S\\   S\\R                     S\\\4   4S jj5       rSrU =r$ )UniSpeechForCTCi  target_langc                   > [         TU ]  U5        [        U5      U l        [        R
                  " UR                  5      U l        X l        UR                  c  [        SU R                   S35      e[        US5      (       a  UR                  (       a  UR                  OUR                  n[        R                   " X1R                  5      U l        U R%                  5         g)a  
target_lang (`str`, *optional*):
    Language id of adapter weights. Adapter weights are stored in the format adapter.<lang>.safetensors or
    adapter.<lang>.bin. Only relevant when using an instance of [`UniSpeechForCTC`] with adapters. Uses 'eng' by
    default.
NzYou are trying to instantiate z with a configuration that does not define the vocabulary size of the language model head. Please instantiate the model as follows: `UniSpeechForCTC.from_pretrained(..., vocab_size=vocab_size)`. or define `vocab_size` of your model's configuration.add_adapter)r:   r;   r  r  rW   r   r%  r   rQ  
vocab_sizer   r?   r]   rS  output_hidden_sizerY   r   lm_headr  )r=   rh   rQ  rU  r?   s       r4   r;   UniSpeechForCTC.__init__  s     	 '/zz&"6"67&$00@ AH H  *1)G)GFL^L^F%%djdvdv 	 yy!35F5FG 	r3   c                     U R                   nUb'  [        U R                  SS5      c  [        SU S35      eUc.  [        U R                  SS5      b  [        R                  S5        gUb  U R                  USS9  gg)a  
This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
passing `target_lang=...` to `from_pretrained(...)`.

This method is **not** supposed to be called by the user and is prone to be changed in the future.
Nr^  zCannot pass `target_lang`: z- if `config.adapter_attn_dim` is not defined.z)By default `target_lang` is set to 'eng'.T)
force_load)rQ  rl  rh   r   loggerinfoload_adapter)r=   rQ  s     r4   tie_weightsUniSpeechForCTC.tie_weights  s     &&"wt{{<NPT'U']:;-Gtuvv WT[[:Ld%S%_KKCD$kd; %r3   c                 Z    [         R                  " S[        5        U R                  5         g)r8  r,  Nr-  r2  s    r4   r3  (UniSpeechForCTC.freeze_feature_extractor  r5  r3   c                 L    U R                   R                  R                  5         gr7  r9  r2  s    r4   r1  &UniSpeechForCTC.freeze_feature_encoder  r;  r3   c                 T    U R                   R                  5        H
  nSUl        M     gz
Calling this function will disable the gradient computation for the base model so that its parameters will not
be updated during training. Only the classification head will be updated.
FNr  r   r   r   s     r4   freeze_base_model!UniSpeechForCTC.freeze_base_model  #    
 ^^..0E"'E 1r3   r   r   r   r0  r1  labelsr   c                    Ub  UOU R                   R                  nUbJ  UR                  5       U R                   R                  :  a"  [	        SU R                   R                   35      eU R                  UUUUUS9nUS   nU R                  U5      nU R                  U5      n	Sn
UGbX  Ub  UO"[        R                  " U[        R                  S9nU R                  UR                  S5      5      R                  [        R                  5      nUS:  nUR                  S5      nUR                  U5      n[        R                   R#                  U	S[        R$                  S9R'                  SS5      n[        R(                  R*                  R-                  S	S
9   [        R                   R/                  UUUUU R                   R0                  U R                   R2                  U R                   R4                  S9n
SSS5        U(       d  U	4U[6        S -   nU
b  U
4U-   $ U$ [9        XUR:                  UR<                  S9$ ! , (       d  f       NL= f)a  
labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
    Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
    the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
    All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
    config.vocab_size - 1]`.
Nz$Label values must be <= vocab_size: r  r   r  r   )rU   rT  r   F)enabled)blank	reductionzero_infinityr"   rB  r&   r'   )rh   r  r  rT  r   r  r   rV  r.   	ones_liker  r  r  r  masked_selectrW   r   log_softmaxfloat32rk   backendscudnnflagsctc_losspad_token_idctc_loss_reductionctc_zero_infinity_HIDDEN_STATES_START_POSITIONr   r&   r'   )r=   r   r   r   r0  r1  ri  r$  r&   rB  r"   r  labels_masktarget_lengthsflattened_targets	log_probsoutputs                    r4   rE   UniSpeechForCTC.forward'  s   " &1%<k$++B]B]&**,$++2H2H"HCDKKDZDZC[\]]..)/!5# ! 
  
]3m, #1"<%//R^fkfpfpBq  !AA.BTBTUWBXY\\]b]g]ghM !A+K(__R0N & 4 4[ A 11&b1V``abdefI%%++E+:}}--%!"++22"kk<<"&++"?"? .  ; Y)F)G!HHF)-)9TGf$EvEG4I4IV]VhVh
 	
 ;:s   A H??
I)r   rV  rQ  r  r~   r  )r)   r*   r+   r,   r   r  r;   r]  r3  r1  rf  r   r.   r  r  r   r1   r   rE   r2   rH   rI   s   @r4   rP  rP    s    HSM  :<*
&>(  26,0/3&*)-D
u||,D
 !.D
 $D>	D

 'tnD
 d^D
 &D
 
un$	%D
 D
r3   rP  z
    UniSpeech Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
    SUPERB Keyword Spotting.
    c                      ^  \ rS rSrU 4S jrS rS rS r\     SS\	\
R                     S\	\
R                     S\	\   S	\	\   S
\	\   S\	\
R                     S\\\4   4S jj5       rSrU =r$ )"UniSpeechForSequenceClassificationio  c                 "  > [         TU ]  U5        [        US5      (       a  UR                  (       a  [	        S5      e[        U5      U l        UR                  S-   nUR                  (       a2  [        R                  " [        R                  " U5      U-  5      U l        [        R                  " UR                  UR                   5      U l        [        R                  " UR                   UR$                  5      U l        U R)                  5         g )NrS  z`Sequence classification does not support the use of UniSpeech adapters (config.add_adapter=True)r   )r:   r;   r]   rS  r   r  r  r+  use_weighted_layer_sumrW   r  r.   r  layer_weightsr   rY   classifier_proj_size	projector
num_labels
classifierr  )r=   rh   
num_layersr?   s      r4   r;   +UniSpeechForSequenceClassification.__init__v  s     6=))f.@.@r  (/--1
((!#ejj.Dz.Q!RD6#5#5v7R7RS))F$?$?ARARS 	r3   c                 Z    [         R                  " S[        5        U R                  5         gr+  r-  r2  s    r4   r3  ;UniSpeechForSequenceClassification.freeze_feature_extractor  r5  r3   c                 L    U R                   R                  R                  5         gr7  r9  r2  s    r4   r1  9UniSpeechForSequenceClassification.freeze_feature_encoder  r;  r3   c                 T    U R                   R                  5        H
  nSUl        M     grd  re  r   s     r4   rf  4UniSpeechForSequenceClassification.freeze_base_model  rh  r3   r   r   r   r0  r1  ri  r   c                 0   Ub  UOU R                   R                  nU R                   R                  (       a  SOUnU R                  UUUUUS9nU R                   R                  (       ai  U[           n[
        R                  " USS9n[        R                  R                  U R                  SS9n	XR                  SSS5      -  R                  SS9nOUS   nU R                  U5      nUc  UR                  SS9n
OU R                  UR                   S   U5      nUR#                  S5      R%                  SSUR                   S   5      nS	X) '   UR                  SS9UR                  SS9R                  SS5      -  n
U R'                  U
5      nSnUbF  [)        5       nU" UR                  SU R                   R*                  5      UR                  S5      5      nU(       d  U4U[        S -   nUb  U4U-   $ U$ [-        UUUR.                  UR0                  S
9$ )a  
input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
    Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
    into an array of type `list[float]`, a `numpy.ndarray` or a `torch.Tensor`, *e.g.* via the torchcodec library
    (`pip install torchcodec`) or the soundfile library (`pip install soundfile`).
    To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion
    into a tensor of type `torch.FloatTensor`. See [`UniSpeechProcessor.__call__`] for details.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
    Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
    config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
    `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
NTr  r   r   r   r   r9   r   ro  )rh   r  r  r  r{  r.   stackrW   r   r   r  r   r  r  r  r  r   r>  r?  r  r   r  r   r&   r'   )r=   r   r   r   r0  r1  ri  r$  r&   norm_weightspooled_outputpadding_maskexpand_padding_maskrB  r"   loss_fctr  s                    r4   rE   *UniSpeechForSequenceClassification.forward  s   . &1%<k$++B]B]'+{{'I'ItOc..)/!5# ! 
 ;;--#$ABM!KK1=M==001C1C0LL*->->r1a-HHMMRSMTM#AJM}5!)..1.5MBB=CVCVWXCY[ijL"."8"8"<"C"CAq-J]J]^_J`"a25M./)--!-4|7G7GA7G7N7S7STVXY7ZZM/')HFKKDKK,B,BCV[[QS_UDY)F)G!HHF)-)9TGf$EvE'!//))	
 	
r3   )r  r  r  r  r  )r)   r*   r+   r,   r;   r3  r1  rf  r   r   r.   r  r  r   r1   r   rE   r2   rH   rI   s   @r4   r  r  o  s    "
&>(  26,0/3&*)-B
u||,B
 !.B
 $D>	B

 'tnB
 d^B
 &B
 
u..	/B
 B
r3   r  )rP  r  r  r  r  )Nr   NrB   )Rr  r.  dataclassesr   typingr   r   r   numpyr  r.   torch.nnrW   r   activationsr	   integrations.deepspeedr
   integrations.fsdpr   modeling_attn_mask_utilsr   r   modeling_flash_attention_utilsr   modeling_layersr   modeling_outputsr   r   r   r   r   modeling_utilsr   r   processing_utilsr   r\   r   r   r   configuration_unispeechr   integrations.flex_attentionr   
get_loggerr)   rZ  r    Moduler6   rK   rn   r   r   r   r   r  r   r   r   r  r  r'  r\  rj  rr  r|  r  r1   r   r  ndarrayr  r  r  r  r{  rP  r  __all__r(   r3   r4   <module>r     sH  ,   ! , ,    % ! @ 7 g B 9  G & J J 4  !!J 
		H	% 
:K : :.BII *ryy *Z$> *"< 6"< 0&bii &R1 1*  $(,%II%<<% 
% <<	%
 U\\*% e_% % %%<U/ U/p299 0!6 !HZryy Zz		 2++E +\^bii ^BC'RYY C'L F F FZ 26tc?tt t U--.	t
 t ZZtn 3  s
- s
 s
l 
B
6 B

B
J !"  
S
. S

S
l p
)A p
p
fr3   