
    <h?                     
   S r SSKJrJrJr  SSKrSSKrSSKJr  SSKJ	r	  SSK
JrJr  SSKJr  SS	KJr  SS
KJr  SSKJrJrJr  SSKJrJr  SSKJrJr  SSKJrJr  SSK J!r!  SSK"J#r#J$r$J%r%J&r&  SSK'J(r(  \%" 5       (       a  SSK)J*r*  SSK+J,r,  \&RZ                  " \.5      r/ " S S\R`                  5      r1S r2S2S jr3 " S S\R`                  5      r4 S3S\R`                  S\Rj                  S\Rj                  S\Rj                  S\\Rj                     S \6S!\64S" jjr7 " S# S$\R`                  5      r8 " S% S&\5      r9\# " S' S(\5      5       r:\# " S) S*\:5      5       r; " S+ S,\:\5      r< " S- S.\\:5      r= " S/ S0\\:5      r>/ S1Qr?g)4zPyTorch Persimmon model.    )CallableOptionalUnionN)nn   )ACT2FN)CacheDynamicCache)GenerationMixin)AttentionMaskConverter)FlashAttentionKwargs) GenericForSequenceClassificationGenericForTokenClassificationGradientCheckpointingLayer)BaseModelOutputWithPastCausalLMOutputWithPast)ROPE_INIT_FUNCTIONSdynamic_rope_update)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)auto_docstringcan_return_tupleis_torch_flex_attn_availablelogging   )PersimmonConfig)	BlockMask)make_flex_block_causal_maskc                   l   ^  \ rS rSrSS\4U 4S jjjr\R                  " 5       \S 5       5       r	Sr
U =r$ )PersimmonRotaryEmbedding;   configc                   > [         TU ]  5         [        US5      (       aZ  [        UR                  [
        5      (       a;  UR                  R                  SUR                  R                  S5      5      U l        OSU l        UR                  U l	        UR                  U l
        Xl        [        U R                     U l        U R                  U R                  U5      u  o0l        U R                  SUSS9  U R                   U l        g )Nrope_scaling	rope_typetypedefaultinv_freqF)
persistent)super__init__hasattr
isinstancer%   dictgetr&   max_position_embeddingsmax_seq_len_cachedoriginal_max_seq_lenr#   r   rope_init_fnattention_scalingregister_bufferr)   original_inv_freq)selfr#   devicer)   	__class__s       h/var/www/html/shao/venv/lib/python3.13/site-packages/transformers/models/persimmon/modeling_persimmon.pyr,   !PersimmonRotaryEmbedding.__init__<   s    6>**z&:M:Mt/T/T#0044[&BUBUBYBYZ`BabDN&DN"("@"@$*$B$B!/?+/+<+<T[[&+Q((ZeD!%    c                 b   U R                   S S S 2S 4   R                  5       R                  UR                  S   SS5      R	                  UR
                  5      nUS S 2S S S 24   R                  5       n[        UR
                  R                  [        5      (       a0  UR
                  R                  S:w  a  UR
                  R                  OSn[        R                  " USS9   UR                  5       UR                  5       -  R                  SS5      n[        R                  " Xf4SS	9nUR                  5       U R                  -  nUR                  5       U R                  -  n	S S S 5        WR	                  UR                   S
9W	R	                  UR                   S
94$ ! , (       d  f       N@= f)Nr   r   mpscpuF)device_typeenabled   dim)dtype)r)   floatexpandshapetor9   r.   r'   strtorchautocast	transposecatcosr5   sinrG   )
r8   xposition_idsinv_freq_expandedposition_ids_expandedrB   freqsembrQ   rR   s
             r;   forward PersimmonRotaryEmbedding.forwardM   sR    !MM$4-8>>@GGHZHZ[\H]_acdehhijiqiqr ,QaZ 8 > > @'1!((--'E'E!((--[`J`ahhmmfk^^UC&,,.1F1L1L1NNYYZ[]^_E))UN3C'')d444C'')d444C	 D vvAGGv$cff177f&;;; DCs   $BF  
F.)r5   r#   r2   r7   r3   r4   r&   N)__name__
__module____qualname____firstlineno__r   r,   rM   no_gradr   rY   __static_attributes____classcell__r:   s   @r;   r!   r!   ;   s6    / / /" ]]_<  <r=   r!   c                     U SSU R                   S   S-  24   nU SU R                   S   S-  S24   n[        R                  " U* U4SS9$ )z*Rotates half the hidden dims of the input..Nr?   rD   rE   )rJ   rM   rP   )rS   x1x2s      r;   rotate_halfrg   ^   sZ    	
3"!''"+"""	#B	
3q ""	#B99rc2YB''r=   c                     UR                  U5      nUR                  U5      nX-  [        U 5      U-  -   nX-  [        U5      U-  -   nXg4$ )a  Applies Rotary Position Embedding to the query and key tensors.

Args:
    q (`torch.Tensor`): The query tensor.
    k (`torch.Tensor`): The key tensor.
    cos (`torch.Tensor`): The cosine part of the rotary embedding.
    sin (`torch.Tensor`): The sine part of the rotary embedding.
    position_ids (`torch.Tensor`, *optional*):
        Deprecated and unused.
    unsqueeze_dim (`int`, *optional*, defaults to 1):
        The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
        sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
        that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
        k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
        cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
        the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
    `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
)	unsqueezerg   )qkrQ   rR   rT   unsqueeze_dimq_embedk_embeds           r;   apply_rotary_pos_embro   f   sS    ( --
&C
--
&Cw;q>C/0Gw;q>C/0Gr=   c                   .   ^  \ rS rSrU 4S jrS rSrU =r$ )PersimmonMLP   c                   > [         TU ]  5         [        R                  " UR                  UR
                  5      U l        [        R                  " UR
                  UR                  5      U l        [        UR                     U l
        g r[   )r+   r,   r   Linearhidden_sizeintermediate_sizedense_h_to_4hdense_4h_to_hr   
hidden_actactr8   r#   r:   s     r;   r,   PersimmonMLP.__init__   s^    YYv'9'96;S;STYYv'?'?ASAST&++,r=   c                 l    U R                  U5      nU R                  U5      nU R                  U5      nU$ r[   )rw   rz   rx   )r8   hidden_statess     r;   rY   PersimmonMLP.forward   s6    **=9/**=9r=   )rz   rx   rw   )r\   r]   r^   r_   r,   rY   ra   rb   rc   s   @r;   rq   rq      s    - r=   rq   modulequerykeyvalueattention_maskscalingdropoutc                    [         R                  " XR                  SS5      5      U-  nUb"  US S 2S S 2S S 2S UR                  S   24   n	X-   n[        R
                  R                  US[         R                  S9R                  UR                  5      n[        R
                  R                  XU R                  S9n[         R                  " X5      n
U
R                  SS5      R                  5       n
X4$ )NrD   r   r?   )rF   rG   )ptrainingr   )rM   matmulrO   rJ   r   
functionalsoftmaxfloat32rK   rG   r   r   
contiguous)r   r   r   r   r   r   r   kwargsattn_weightscausal_maskattn_outputs              r;   eager_attention_forwardr      s     <<}}Q':;gEL!$Q1o		"o%=>#1==((2U]](SVVW\WbWbcL==((6??([L,,|3K''1-88:K$$r=   c                     ^  \ rS rSrSrSS\S\\   4U 4S jjjrS\	R                  S\\	R                  \	R                  \	R                  4   4S jr       SS	\	R                  S
\\	R                     S\\	R                     S\\   S\S\S\\	R                     S\\\	R                  \	R                  4      S\\   S\\	R                  \\	R                     \\\	R                        4   4S jjrSrU =r$ )PersimmonAttention   z=Multi-headed attention from 'Attention Is All You Need' paperr#   	layer_idxc                   > [         TU ]  5         Xl        X l        Uc-  [        R                  SU R                  R                   S35        UR                  U l        UR                  U l
        U R                  U R                  -  U l        UR                  U l        [        U R                  UR                  -  5      U l        SU l        U R                  U R                  -  U R                  :w  a&  [#        SU R                   SU R                   S35      e[$        R&                  " U R                  SU R                  -  SS9U l        [$        R&                  " U R                  U R                  -  U R                  SS9U l        UR,                  U l        U R                  S	-  U l        U R,                  (       ax  [$        R0                  " UR                  U R                  -  UR2                  SS
9U l        [$        R0                  " UR                  U R                  -  UR2                  SS
9U l        [$        R8                  " UR:                  5      U l        [=        U R                  S9U l        g )NzInstantiating z without passing a `layer_idx` is not recommended and will lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` when creating this class.Tz?hidden_size must be divisible by num_heads (got `hidden_size`: z and `num_heads`: z).r   biasg      )epselementwise_affiner#   ) r+   r,   r#   r   loggerwarning_oncer:   r\   ru   num_attention_heads	num_headshead_dim
rope_thetaintpartial_rotary_factorrotary_ndims	is_causal
ValueErrorr   rt   query_key_valuedenseqk_layernormr   	LayerNormlayer_norm_epsq_layernormk_layernormDropoutattention_dropoutr!   
rotary_embr8   r#   r   r:   s      r;   r,   PersimmonAttention.__init__   s   " !8!8 9 :, , "--33((DNN: ++0L0L LMMMDNN*t/?/??QRVRbRbQc$T^^$4B8   "yy)9)91t?O?O;OVZ[YYt~~=t?O?OVZ[
"//}}d*!||""dnn4&:O:Odh D  "||""dnn4&:O:Odh D "$F,D,D!E2$++Fr=   	fused_qkvreturnc                     UR                   u  p#nUR                  X#U R                  SU R                  5      nUSSSS24   USSSS24   USSSS24   4$ )a  
Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
storage as `fused_qkv`

Args:
    fused_qkv (`torch.tensor`): [batch_size, seq_length, num_heads * 3 * head_dim]

Returns:
    query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
    value: [batch_size, seq_length, num_heads, head_dim]
r   .r   Nr   rD   )rJ   viewr   r   )r8   r   
batch_size
seq_lengththree_times_hidden_sizes        r;   _split_headsPersimmonAttention._split_heads   s^     ;D//7
 7NN:4>>1dmm\	a#YsAqy%99S!QY;OOOr=   r~   r   rT   past_key_valueoutput_attentions	use_cachecache_positionposition_embeddingsr   c	                 0   UR                  5       u  pnU R                  U5      nU R                  U5      u  pnU R                  (       a"  U R	                  U5      nU R                  U5      nUR                  SS5      nUR                  SS5      nUR                  SS5      nUu  nnUSS U R                  24   USU R                  S 24   nnUSS U R                  24   USU R                  S 24   nn[        UUUU5      u  nn[        R                  " UU4SS9n[        R                  " UU4SS9nUb2  UUU R                  US.nUR                  UUU R                  U5      u  nn[        nU R                  R                  S:w  a  [         U R                  R                     nU" U UUUU4U R"                  (       d  SOU R                  R$                  U R&                  S	.U	D6u  nnUR)                  XS5      nU R+                  U5      nU(       d  S nUU4$ )
Nr   rD   .r?   rE   )rR   rQ   partial_rotation_sizer   eager        )r   r   )sizer   r   r   r   r   rO   r   ro   rM   rP   updater   r   r#   _attn_implementationr   r   r   r   reshaper   )r8   r~   r   rT   r   r   r   r   r   r   bszq_len_r   query_states
key_statesvalue_statesrQ   rR   	query_rot
query_passkey_rotkey_passcache_kwargsattention_interfacer   r   s                              r;   rY   PersimmonAttention.forward   sO    &**,A ((7	 483D3DY3O0<++L9L))*5J $--a3#--a3))!Q/
&S 1 1 1112d//112 	
 s/d////0sD--//0 
 2)Wc3O	7 yy)Z!8bAYY2;
% )-):):"0	L (6'<'<ZW[WeWegs't$J(?;;++w6"9$++:Z:Z"[$7	%
  $}}C$++2O2OLL	%
 	%
!\ "))#b9jj- LL((r=   )r   r#   r   r   ru   r   r   r   r   r   r   r   r   r   r   r   r[   NNNFFNN)r\   r]   r^   r_   __doc__r   r   r   r,   rM   Tensortupler   
LongTensorr	   boolr   r   rY   ra   rb   rc   s   @r;   r   r      sV   G$G $G8C= $G $GLPell PuU\\5<<Y^YeYe=e7f P& 2637*."'59KON)||N) !.N) u//0	N)
 !N)  N) N) !!1!12N) &eELL%,,,F&GHN) -.N) 
u||Xell3XeELL>Q5RR	SN) N)r=   r   c                     ^  \ rS rSrS\S\4U 4S jjr       SS\R                  S\	\R                     S\	\R                     S\	\\R                        S	\	\   S
\	\   S\	\R                     S\	\\R                  \R                  4      S\\   S\\R                  \	\\R                  \R                  4      4   4S jjrSrU =r$ )PersimmonDecoderLayeri1  r#   r   c                   > [         TU ]  5         UR                  U l        [        XS9U l        [        U5      U l        [        R                  " UR                  UR                  S9U l
        [        R                  " UR                  UR                  S9U l        [        R                  " UR                  5      U l        g )N)r#   r   r   )r+   r,   ru   r   	self_attnrq   mlpr   r   r   input_layernormpost_attention_layernormr   hidden_dropoutr   r   s      r;   r,   PersimmonDecoderLayer.__init__2  s    !--+6O'!||F,>,>FDYDYZ(*V5G5GVMbMb(c%zz&"7"78r=   r~   r   rT   r   r   r   r   r   r   r   c	                     Un
U R                  U5      nU R                  " SUUUUUUUUS.U	D6u  pX-   nUn
U R                  U5      nU R                  U5      nU R	                  U5      nX-   nU4nU(       a  X4-  nU$ )a  
Args:
    hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
    attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
        `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
    position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
        Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
        `[0, config.n_positions - 1]`.
        [What are position IDs?](../glossary#position-ids)
    past_key_value (`Tuple(torch.FloatTensor)`, *optional*):
        cached past key and value projection states
    output_attentions (`bool`, *optional*):
        Whether or not to return the attentions tensors of all attention layers. See `attentions` under
        returned tensors for more detail.
    use_cache (`bool`, *optional*):
        If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
        (see `past_key_values`).
    cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
        Indices depicting the position of the input sequence tokens in the sequence
    position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
        Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
        with `head_dim` being the embedding dimension of each attention head.
)r~   r   rT   r   r   r   r   r    )r   r   r   r   r   )r8   r~   r   rT   r   r   r   r   r   r   residualself_attn_weightsoutputss                r;   rY   PersimmonDecoderLayer.forward;  s    H !,,]; ,0>> 
,
')%)/) 3
,
 
,
( !0 !55mD/]3%0 "++Gr=   )r   ru   r   r   r   r   r   )r\   r]   r^   r_   r   r   r,   rM   r   r   r   r   r   r   r   FloatTensorrY   ra   rb   rc   s   @r;   r   r   1  s&   9 93 9 26378<,1$)59KOC||C !.C u//0	C
 !u||!45C $D>C D>C !!1!12C &eELL%,,,F&GHC -.C 
u  (51B1BEDUDU1U+V"WW	XC Cr=   r   c                   H    \ rS rSr% \\S'   SrSrS/rSr	Sr
SrSrSrS rSrg	)
PersimmonPreTrainedModeli  r#   modelTr   past_key_valuesc                    U R                   R                  n[        U[        R                  5      (       aW  UR
                  R                  R                  SUS9  UR                  b%  UR                  R                  R                  5         g g [        U[        R                  5      (       ad  UR
                  R                  R                  SUS9  UR                  b2  UR
                  R                  UR                     R                  5         g g [        U[        R                  5      (       aJ  UR
                  R                  R                  S5        UR                  R                  R                  5         g g )Nr   )meanstdg      ?)r#   initializer_ranger.   r   rt   weightdatanormal_r   zero_	Embeddingpadding_idxr   fill_)r8   r   r   s      r;   _init_weights&PersimmonPreTrainedModel._init_weights  s   kk++fbii((MM&&CS&9{{&  &&( '--MM&&CS&9!!-""6#5#56<<> .--MM$$S)KK""$ .r=   r   N)r\   r]   r^   r_   r   __annotations__base_model_prefixsupports_gradient_checkpointing_no_split_modules_skip_keys_device_placement_can_compile_fullgraph_supports_sdpa_supports_flash_attn_supports_attention_backendr   ra   r   r=   r;   r   r     s?    &*#01"3!N"&%r=   r   c                     ^  \ rS rSrSrS\4U 4S jjr\\         SS\	\
R                     S\	\
R                     S\	\
R                     S\	\   S	\	\
R                     S
\	\   S\	\   S\	\   S\	\
R                     S\\   S\4S jj5       5       r SS\\
R                  S4   S\
R                  S\
R                  S\S\4
S jjr\S\
R                  S\S\S\
R0                  S\
R                  S\4S j5       rSrU =r$ )PersimmonModeli  z
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`PersimmonDecoderLayer`]

Args:
    config: PersimmonConfig
r#   c           	        > [         TU ]  U5        UR                  U l        UR                  U l        [
        R                  " UR                  UR                  U R                  5      U l        [
        R                  " [        UR                  5       Vs/ sH  n[        X5      PM     sn5      U l        [
        R                  " UR                  UR                  S9U l        [#        US9U l        SU l        U R)                  5         g s  snf )Nr   r   F)r+   r,   pad_token_idr   
vocab_sizer   r   ru   embed_tokens
ModuleListrangenum_hidden_layersr   layersr   r   final_layernormr!   r   gradient_checkpointing	post_initr   s      r;   r,   PersimmonModel.__init__  s     !.. ++LL):):F<N<NPTP`P`ammGLVMeMeGfgGf)"65Gfg
  "||F,>,>FDYDYZ2&A&+# hs   D		input_idsr   rT   r   inputs_embedsr   r   output_hidden_statesr   r   r   c
                 
   Ub  UOU R                   R                  nUb  UOU R                   R                  nUb  UOU R                   R                  nUS L US L-  (       a  [	        S5      eU R
                  (       a/  U R                  (       a  U(       a  [        R                  S5        Sn[        U[        S 5      [        45      (       d  [	        S5      eU(       a  Uc
  [        5       nUc  U R                  U5      nU	cD  Ub  UR                  5       OSn[        R                   " XUR"                  S   -   UR$                  S9n	Uc  U	R'                  S5      nU R)                  X%XU5      nUnU R+                  X5      nU(       a  SOS nU(       a  SOS nU R,                   H7  nU(       a  X4-  nU" U4UUUUUU	US	.U
D6nUS   nU(       d  M.  UUS   4-  nM9     U R/                  U5      nU(       a  X4-  n[1        UUUUS
9$ )Nz:You must specify exactly one of input_ids or inputs_embedszZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...FzBThe `past_key_values` should be either a `Cache` object or `None`.r   r   r9   r   )r   rT   r   r   r   r   r   )last_hidden_stater   r~   
attentions)r#   r   r  r   r   r  r   r   r   r.   r'   r	   r
   r  get_seq_lengthrM   arangerJ   r9   ri   _update_causal_maskr   r  r  r   )r8   r  r   rT   r   r  r   r   r  r   r   past_seen_tokensr   r~   r   all_hidden_statesall_self_attnsdecoder_layerlayer_outputss                      r;   rY   PersimmonModel.forward  s5    2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	-t";<YZZ&&4==##p "	 /DJ+>??abb0*nO  --i8M!CRC^==?de"\\ ]5H5H5K"KTaThThN )33A6L..>L]
 & #oomJ #7BD0d![[M#!%55!)
*)."3#-$7
 
M *!,M  =#3"55' )* ,,];  !11&+++%	
 	
r=   r   input_tensorc           	         U R                   R                  S:X  a  Ub  US:H  R                  5       (       a  U$ g U R                   R                  S:X  a,  [        U[        R
                  5      (       a  [        U5      nU$ Ub  UR                  5       OSnUb  UR                  OSnU R                   R                  S:X  a5  U(       d.  U(       d'  [        R                  " UUUU R                  S9(       a  g UR                  nUR                  S   n	U(       a  UR                  5       n
O5[        U[        R
                  5      (       a  UR                  S	   OXi-   S-   n
U R                  UU	U
UUUR                  S   S
9nU R                   R                  S:X  aZ  UbW  UR                   R"                  S;   a=  U(       d6  [        R$                  " U5      R&                  n[        R(                  " X5      nU$ )Nflash_attention_2r   flex_attentionr   Fsdpa)r  past_key_values_lengthis_trainingr   r?   )sequence_lengthtarget_lengthrG   r   r   )cudaxpunpu)r#   r   anyr.   rM   r   r   r  is_compileabler   _ignore_causal_mask_sdpar   rG   rJ   get_max_cache_shape5_prepare_4d_causal_attention_mask_with_cache_positionr9   r'   finfomin_unmask_unattended)r8   r   r(  r   r   r   r"  using_compilable_cacherG   r/  r0  r   	min_dtypes                r;   r!  "PersimmonModel._update_causal_mask  s    ;;++/BB)~/D.I.I.K.K%%;;++/??.%,,77!<^!L!!
 @O?Z?99;`aCRC^!?!?di ;;++v5>T]n%>>*'7 MM	 ""&,,Q/!+??AM nell;; $$R(%7!;  PP+')#))!, Q 
 KK,,6*%%**.DD%
 E*..I0CCK[Kr=   r/  r0  rG   r   c                    U b  U R                  5       S:X  a  U nU$ [        R                  " U5      R                  n[        R                  " X4XUR
                  S9nUS:w  a  [        R                  " USS9nU[        R                  " X$R
                  S9UR                  SS5      :  -  nUSSSS2SS24   R                  USSS5      nU b  UR                  5       nU R                  S   n	USS2SS2SS2SU	24   U SS2SSSS24   R                  UR
                  5      -   n
U
S:H  n
USS2SS2SS2SU	24   R                  X5      USS2SS2SS2SU	24'   U$ )	a  
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

Args:
    attention_mask (`torch.Tensor`):
        A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
        `(batch_size, 1, query_length, key_value_length)`.
    sequence_length (`int`):
        The sequence length being processed.
    target_length (`int`):
        The target length: when generating with static cache, the mask should be as long as the static cache,
        to account for the 0 padding, the part of the cache that is not filled yet.
    dtype (`torch.dtype`):
        The dtype to use for the 4D attention mask.
    cache_position (`torch.Tensor`):
        Indices depicting the position of the input sequence tokens in the sequence.
    batch_size (`torch.Tensor`):
        Batch size.
N   )
fill_valuerG   r9   r   )diagonalr  r?   r   )rF   rM   r9  r:  fullr9   triur   r   rI   clonerJ   rK   masked_fill)r   r/  r0  rG   r   r   r   r   r=  mask_lengthpadding_masks              r;   r8  DPersimmonModel._prepare_4d_causal_attention_mask_with_cache_position\  s}   > %.*<*<*>!*C(K* ' E*..I** 0Y\j\q\qK !##jjqA5<<>S>STWeWmWmnprsWtttK%dD!Q&67>>z1bRTUK))//1,2226*1aL[L+@ANSTVZ\`bcScDdDgDg&&E    ,q05@Aq,;,AV5W5c5c 6Aq!\k\12 r=   )r  r  r  r  r   r   r  )	NNNNNNNNN)F)r\   r]   r^   r_   r   r   r,   r   r   r   rM   r   r   r	   r   r   r   r   r   rY   r   r!  staticmethodr   rG   r8  ra   rb   rc   s   @r;   r  r    s    "  151537+/59$(,0/359\
E,,-\
 !.\
 u//0	\

 "%\
   1 12\
 D>\
 $D>\
 'tn\
 !!1!12\
 -.\
 
!\
  \
J #(BellK78B llB 	B
 B  BH 444 4 {{	4
 4 4 4r=   r  c                   t  ^  \ rS rSrS/rU 4S jrS rS r\\	           SS\
\R                     S\
\R                     S\
\R                     S	\
\   S
\
\R                     S\
\R                     S\
\   S\
\   S\
\   S\
\R                     S\\\R                  4   S\4S jj5       5       rSrU =r$ )PersimmonForCausalLMi  zlm_head.weightc                    > [         TU ]  U5        [        U5      U l        UR                  U l        [
        R                  " UR                  UR                  SS9U l        U R                  5         g )NFr   )
r+   r,   r  r   r  r   rt   ru   lm_headr  r{   s     r;   r,   PersimmonForCausalLM.__init__  sU     #F+
 ++yy!3!3V5F5FUS 	r=   c                     Xl         g r[   r   )r8   decoders     r;   set_decoder PersimmonForCausalLM.set_decoder  s    
r=   c                     U R                   $ r[   rQ  )r8   s    r;   get_decoder PersimmonForCausalLM.get_decoder  s    zzr=   r  r   rT   r   r  labelsr   r   r  r   logits_to_keepr   c                    Ub  UOU R                   R                  nU	b  U	OU R                   R                  n	U R                  " SUUUUUUUU	U
S.	UD6nUR                  n[        U[        5      (       a  [        U* S5      OUnU R                  USS2USS24   5      nSnUb*  U R                  " UU4SU R                   R                  0UD6n[        UUUR                  UR                  UR                  S9$ )u  
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
    Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
    config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
    (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

Example:

```python
>>> from transformers import AutoTokenizer, PersimmonForCausalLM

>>> model = PersimmonForCausalLM.from_pretrained("adept/persimmon-8b-base")
>>> tokenizer = AutoTokenizer.from_pretrained("adept/persimmon-8b-base")

>>> prompt = "human: Hey, what should I eat for dinner?"
>>> inputs = tokenizer(prompt, return_tensors="pt")

>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
'human: Hey, what should I eat for dinner?\n\ncat: 🐱\n\nhuman: 😐\n\n'
```N)	r  r   rT   r   r  r   r   r  r   r  )losslogitsr   r~   r  r   )r#   r   r  r   r  r.   r   slicerN  loss_functionr  r   r   r~   r  )r8   r  r   rT   r   r  rX  r   r   r  r   rY  r   r   r~   slice_indicesr\  r[  s                     r;   rY   PersimmonForCausalLM.forward  s)   P 2C1N-TXT_T_TqTq$8$D $++JjJj 	
 ,0:: ,
)%+'/!5),
 ,
  118B>SV8W8W~ot4]kmA}a,?@A%%  ;;11 	D &#33!//))
 	
r=   )rN  r   r  )NNNNNNNNNNr   )r\   r]   r^   r_   _tied_weights_keysr,   rS  rV  r   r   r   rM   r   r   r	   r   r   r   r   r   rY   ra   rb   rc   s   @r;   rL  rL    s<   *+  151537+/59-1$(,0/35934M
E,,-M
 !.M
 u//0	M

 "%M
   1 12M
 ))*M
 D>M
 $D>M
 'tnM
 !!1!12M
 c5<</0M
 
 M
  M
r=   rL  c                       \ rS rSrSrg)"PersimmonForSequenceClassificationi  r   Nr\   r]   r^   r_   ra   r   r=   r;   rc  rc    s    fir=   rc  c                       \ rS rSrSrg)PersimmonForTokenClassificationi  r   Nrd  r   r=   r;   rf  rf    s    `cr=   rf  )rL  r  r   rc  rf  )Nr   )r   )@r   typingr   r   r   rM   torch.utils.checkpointr   activationsr   cache_utilsr	   r
   
generationr   modeling_attn_mask_utilsr   modeling_flash_attention_utilsr   modeling_layersr   r   r   modeling_outputsr   r   modeling_rope_utilsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   configuration_persimmonr   !torch.nn.attention.flex_attentionr   integrations.flex_attentionr   
get_loggerr\   r   Moduler!   rg   ro   rq   r   rH   r   r   r   r   r  rL  rc  rf  __all__r   r=   r;   <module>rz     s  (  , ,    ! . ) > B 
 L F & \ \ 4  !!;J 
		H	%<ryy <F(8299 * %II%<<% 
% <<	%
 U\\*% % %.G) G)TM6 M` % % %6 t- t tnd
3_ d
N j)IKc i d&CE] cr=   