o
    
sh                     @   s   d dl mZ d dlZdejdedejfddZ			dd	ejjd
ejdejdejdeej dedee dee	 de
ejdf fddZdS )    )OptionalNhidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
       N)shapeexpandreshape)r   r   batchnum_key_value_headsslenhead_dim r   b/var/www/html/alpaca_bot/venv/lib/python3.10/site-packages/transformers/integrations/sdpa_paged.py	repeat_kv   s
   0r           modulequerykeyvalueattention_maskdropoutscaling	is_causalc              	   K   s   | dd }	|	d ur|	j||| jfi |\}}t| dr)t|| j}t|| j}|}
| }| }| }tjj	j
||||
||dd}|dd }|d fS )Ncachenum_key_value_groupsF)	attn_mask	dropout_pscaler   r      )popupdate	layer_idxhasattrr   r   
contiguoustorchnn
functionalscaled_dot_product_attention	transpose)r   r   r   r   r   r   r   r   kwargsr   causal_maskattn_outputr   r   r   sdpa_attention_paged_forward   s*   
	r-   )r   NN)typingr   r%   Tensorintr   r&   Modulefloatbooltupler-   r   r   r   r   <module>   s2    
