o
    
sh1                     @   s  d Z ddlmZmZ ddlZddlmZ ddlmZm	Z	 ddl
mZmZmZ e r9ddlmZ dd	lmZmZmZ e	eZG d
d dZ	d'dejdejdejdeejeejejf f fddZeejef Z					d(dejdee deeeef  dee ddf
ddZdejdedejfddZ				d)dej j!dejdejdejd eejdf d!ee" d"ee" d#eej d$eej deejeej f fd%d&Z#dS )*a7  
Partially inspired by torchtune's flex attention implementation

Citation:
@software{torchtune,
  title = {torchtune: PyTorch's finetuning library},
  author = {torchtune maintainers and contributors},
  url = {https//github.com/pytorch/torchtune},
  license = {BSD-3-Clause},
  month = apr,
  year = {2024}
}
    )OptionalUnionN)version   )is_torch_flex_attn_availablelogging)_torch_versionis_torch_less_or_equalis_torchdynamo_compiling)_DEFAULT_SPARSE_BLOCK_SIZE)	BlockMaskcreate_block_maskflex_attentionc                       sJ   e Zd ZdZdZdZdZ fddZej	j
dddd Zd	d
 Z  ZS )WrappedFlexAttentionzh
    We are doing a singleton class so that flex attention is compiled once when it's first called.
    NFc                    s   | j d u rt | | _ | j S N)	_instancesuper__new__)clsargskwargs	__class__ f/var/www/html/alpaca_bot/venv/lib/python3.10/site-packages/transformers/integrations/flex_attention.pyr   7   s   
zWrappedFlexAttention.__new__)	recursivec                 C   sr   | j r|| jkr7|| _tdrtjtdd| _ntt	j
dkr,|r,tjtddd| _ntt| _d| _ dS dS )	z>
        Initialize or update the singleton instance.
        2.5.1F)dynamicz2.6.0zmax-autotune-no-cudagraphs)r   modeTN)_is_flex_compiledtrainingr	   torchcompiler   _compiled_flex_attentionr   parser   base_version)selfr    r   r   r   __init__=   s   

zWrappedFlexAttention.__init__c                 C   s   | j S r   )r#   )r&   r   r   r   __call__S   s   zWrappedFlexAttention.__call__)__name__
__module____qualname____doc__r   r   r#   r   r!   compilerdisabler'   r(   __classcell__r   r   r   r   r   .   s    
r   Fquerykeyvaluereturnc                 K   s(   t  st| nt}|| ||fi |S r   )r
   r   r   )r0   r1   r2   r    r   flex_attention_compiledr   r   r   compile_friendly_flex_attentionW   s   	r5   Tattention_mask_2dattention_chunk_sizeoffsets	is_causalr   c              	      s   j \}}|s	|}|s|}|t d t }tjjj dd|| fd  j}	  |dur< d	dd |  fddfdd	}
 fd
d}|sV|n|du r\n|
|dury|d 
|	|d 
|	fdd}n}t||d|||	td dS )aG  
    IMPORTANT NOTICE: This function is deprecated in favor of using the mask primitives in `masking_utils.py`,
    and will be removed in a future version without warnings. New code should not use it. It is only kept here
    for BC for now, while models using it are being patched accordingly.

    Create a block (causal) document mask for a batch of sequences, both packed and unpacked.
    Create Block (causal) logic and passing it into :func:`torch.nn.attention.flex_attention.create_block_mask`.
    The resultant BlockMask is a compressed representation of the full (causal) block
    mask. BlockMask is essential for performant computation of flex attention.
    See: https://pytorch.org/blog/flexattention/

    Args:
        attention_mask_2d (torch.Tensor): Attention mask for packed and padded sequences
        of shape (batch_size, total_seq_len). e.g.

        For unpacked sequence:
        [[1, 1, 1, 1, 0, 0, 0],
         [1, 1, 1, 1, 1, 0, 0]]

        For packed sequence:
        [[1, 1, 1, 2, 2, 2, 0],
         [1, 1, 2, 2, 2, 3, 3]]

    Returns:
        BlockMask
       r   )r2   padNc                    s@   ||k}| |f | |f k} | |f dk}||@ |@ }|S )z
        Defines the logic of a block causal mask by combining both a standard causal mask
        and a block diagonal document mask.
        See :func:`~torchtune.modules.attention_utils.create_block_causal_mask`
        for an illustration.
        r   r   )	batch_idxhead_idxq_idxkv_idxcausal_maskdocument_maskpadding_mask
final_maskr6   document_idsr   r   causal_mask_mod   s
   z4make_flex_block_causal_mask.<locals>.causal_mask_modc                    s.   | |f | |f k} | |||}||@ S )zU
        Combines the chunk mask with the causal mask for chunked attention.
        r   )r=   r>   r?   r@   
chunk_maskcausal_doc_mask)rG   
chunk_idxsr   r   chunk_causal_mask_mod   s   z:make_flex_block_causal_mask.<locals>.chunk_causal_mask_modc                    s4   | |f | |f k} | |f dk}||@ }|S )zp
        Utilizes default attention mask to enable encoder and encoder-decoder
        attention masks.
        r   r   )r=   r>   r?   r@   rB   rC   rD   rE   r   r   default_mask_mod   s   z5make_flex_block_causal_mask.<locals>.default_mask_modc                    s   | }|  }| |||S r   r   )r=   r>   r?   r@   offset_q	offset_kv)	kv_offsetmask_mod_maybe_combinedq_offsetr   r   mask_mod   s   z-make_flex_block_causal_mask.<locals>.mask_modr   )rR   BHQ_LENKV_LENdevice_compile)shapeflex_default_block_sizer!   nn
functionalr;   rW   clonefill_cumsumtor   r	   )r6   r7   query_length
key_lengthr8   r9   
batch_sizetotal_seq_lenpad_lenrW   rK   rL   rR   r   )r6   rG   rJ   rF   rO   rP   rQ   r   make_flex_block_causal_maskm   s>   
"rf   hidden_statesn_repc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
    r:   N)rY   expandreshape)rg   rh   batchnum_key_value_headsslenhead_dimr   r   r   	repeat_kv   s
   0ro   moduleattention_maskscalingsoftcap	head_masks_auxc	                    sJ   d ur	t d |	dddkrtdd }
d t|tr!|}
n|d ur:d d d d d d d |jd f  fdd}d	}|jd
 }||d
 @ dkrnt||jd
 |jd
  }t||jd
 |jd
  }d}|	d}|jj	dk}t
|||||
||||| jd
}|r|\}}||j}n|}d }|d
d }||fS )Nzm`flex_attention` does not support `head_mask`. Please set your attention to `eager` if you want this feature.dropoutg        r   z`flex_attention` does not support `dropout`. Please use it with inference only (`model.eval()`) or turn off the attention dropout in the respective config.c           	         s   d urt |   } d ur| | d | |  }  d ur-|  | | d d  } d urUt j| dddj}t | }t | | }|jddd| }|| } | S )Nr   r<   T)dimkeepdim)r!   tanhmaxvaluesexpsum)	scorer=   r>   r?   r@   
logits_maxsinksunnormalized_scores
normalizerrt   ru   
score_maskrs   r   r   	score_mod  s   z)flex_attention_forward.<locals>.score_modTr:   Fkernel_optionscpu)r   
block_mask
enable_gqascaler   
return_lser    r   )loggerwarning_onceget
ValueError
isinstancer   rY   ro   rW   typer5   r    r`   dtype	transpose
contiguous)rp   r0   r1   r2   rq   rr   rs   rt   ru   r   r   r   r   num_local_query_headsr   r   flex_attention_outputattention_outputlser   r   r   flex_attention_forward   sV   
&

r   )F)NNNNT)NNNN)$r,   typingr   r   r!   	packagingr   utilsr   r   utils.import_utilsr   r	   r
   !torch.nn.attention.flex_attentionr   rZ   r   r   r   
get_loggerr)   r   r   Tensortupler5   intOffsetboolrf   ro   r[   Modulefloatr   r   r   r   r   <module>   s    
-

r	