
    hP                        d dl mZ d dlmZmZ d dlZd dlmZ ddlmZ ddl	m
Z
 ddlmZ dd	lmZmZ dd
lmZ ddlmZmZ ddlmZ ddlmZ e ed       G d de                    Ze ed       G d de                    Z G d dej6                        Ze G d de             Z ed       G d de             Z ed       G d d ee             Zg d!Z y)"    )	dataclass)OptionalUnionN)nn   )ACT2FN)Cache)GenerationMixin)BaseModelOutputWithPastModelOutput)PreTrainedModel)auto_docstringcan_return_tuple   )	AutoModel   )VipLlavaConfigzM
    Base class for VipLlava outputs, with hidden states and attentions.
    )custom_introc                   :    e Zd ZU dZdZeej                     ed<   y)VipLlavaModelOutputWithPasta  
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    image_hidden_states (`torch.FloatTensor`, *optional*):
        A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
        image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
    Nimage_hidden_states)	__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__     m/var/www/html/aiagenthome/venv/lib/python3.12/site-packages/transformers/models/vipllava/modeling_vipllava.pyr   r   &   s    	 8<%"3"34;r    r   zT
    Base class for VipLlava causal language model (or autoregressive) outputs.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
ee   ed<   dZeeej                        ed<   dZeeej                        ed<   dZeej                     ed<   y)	VipLlavaCausalLMOutputWithPasta4  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
        Language modeling loss (for next-token prediction).
    logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
        Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
    past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
        It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).

        Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
        `past_key_values` input) to speed up sequential decoding.
    image_hidden_states (`torch.FloatTensor`, *optional*):
        A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
        image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
    Nlosslogitspast_key_valueshidden_states
attentionsr   )r   r   r   r   r$   r   r   r   r   r%   r&   r	   r'   tupler(   r   r   r    r!   r#   r#   ;   s     )-D(5$$
%,*.FHU&&'.'+OXe_+8<M8E%"3"345<59Ju001297;%"3"34;r    r#   c                   *     e Zd Zdef fdZd Z xZS )VipLlavaMultiModalProjectorconfigc                 H   t         |           t        |j                  t              rdnt        |j                        }t        j                  ||j                  j                  z  |j                        | _        t        j                  ||j                  j                  z  |j                  j                  d      | _        t        |j                      | _        t        j                  |j                  j                  |j                  j                  d      | _        y )Nr   )epsTbias)super__init__
isinstancevision_feature_layersintlenr   	LayerNormvision_confighidden_sizeprojector_layernorm_epsprojector_layernormLineartext_configlinear_1r   projector_hidden_actactlinear_2)selfr,   num_feature_layers	__class__s      r!   r2   z$VipLlavaMultiModalProjector.__init__Z   s    ",V-I-I3"OQUXY_YuYuUv#%<<!5!5!A!AAvGeGe$
  		!5!5!A!AA**

 &556		&"4"4"@"@&BTBTB`B`gklr    c                     | j                  |      }| j                  |      }| j                  |      }| j                  |      }|S N)r;   r>   r@   rA   )rB   r'   s     r!   forwardz#VipLlavaMultiModalProjector.forwardi   sB    00?m4/m4r    )r   r   r   r   r2   rG   __classcell__rD   s   @r!   r+   r+   Y   s    m~ mr    r+   c                   8    e Zd ZU eed<   dZdZdZdZdZ	dZ
dZdZy)VipLlavaPreTrainedModelr,    Tr&   N)r   r   r   r   r   base_model_prefixsupports_gradient_checkpointing_skip_keys_device_placement_supports_flash_attn_supports_sdpa_can_compile_fullgraph_supports_flex_attn_supports_attention_backendr   r    r!   rK   rK   q   s7    &*#"3N!"&r    rK   zx
    The VipLlava model which consists of a vision backbone and a language model, without a language modeling head.
    c                       e Zd ZddiZdef fdZd Zd Zd Zd Z		 dd	e
j                  d
eeeee   f      fdZde
j"                  de
j                  de
j                  fdZe	 	 	 	 	 	 	 	 	 	 	 	 ddee
j"                     d	ee
j                     dee
j(                     dee
j"                     dee   dee
j                     d
eeeee   f      dee   dee   dee   dee   dee
j"                     deeef   fd       Z xZS )VipLlavaModelzlanguage_model.modellanguage_modelr,   c                     t         |   |       t        j                  |j                        | _        t        |      | _        t        j                  |j                        | _	        | j                          y rF   )r1   r2   r   from_configr8   vision_towerr+   multi_modal_projectorr=   rW   	post_initrB   r,   rD   s     r!   r2   zVipLlavaModel.__init__   sY     %11&2F2FG%@%H"'33F4F4FGr    c                 6    | j                   j                         S rF   )rW   get_input_embeddingsrB   s    r!   r_   z"VipLlavaModel.get_input_embeddings   s    ""7799r    c                 :    | j                   j                  |       y rF   )rW   set_input_embeddingsrB   values     r!   rb   z"VipLlavaModel.set_input_embeddings   s    007r    c                     || _         y rF   rW   rB   decoders     r!   set_decoderzVipLlavaModel.set_decoder   s
    %r    c                     | j                   S rF   rf   r`   s    r!   get_decoderzVipLlavaModel.get_decoder   s    """r    pixel_valuesr4   c                 V   ||n| j                   j                  }| j                  |d      }t        |t              r|j
                  |   ddddf   }n<|D cg c]  }|j
                  |   ddddf    }}t        j                  |d      }| j                  |      }|S c c}w )aW  
        Obtains image last hidden states from the vision tower and apply multimodal projection.

        Args:
            pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
               The tensors corresponding to the input images.
            vision_feature_layers (`Union[int, list[int]]`):
                The vision feature layer, or the list of indexes of the layers to select
                the vision feature.
        Returns:
            image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
        NT)output_hidden_statesr   )dim)	r,   r4   rZ   r3   r5   r'   r   catr[   )rB   rl   r4   image_outputsimage_featuresindexs         r!   get_image_featuresz VipLlavaModel.get_image_features   s      &;%F!DKKLmLm 	 )),T)R +S1*889NOPQSTSUPUVN VkkUjEm99%@ABGUjNk"YY~2>N33NC ls   B&	input_idsinputs_embedsrs   c                 P   |m| | j                         t        j                  | j                  j                  t        j
                  |j                              k(  }|j                  d      }n|| j                  j                  k(  }|j                         }|j                  d      j                  |      j                  |j                        }|j                  d   |j                  d   z  }||   j                         |j                         k7  rt        d| d|       |S )z
        Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
        equal to the length of multimodal features. If the lengths are different, an error is raised.
        )dtypedevicero   r   r   z6Image features and image tokens do not match: tokens: z, features )r_   r   tensorr,   image_token_idlongrz   allsum	unsqueeze	expand_astoshapenumel
ValueError)rB   rv   rw   rs   special_image_maskn_image_tokensn_image_featuress          r!   get_placeholder_maskz"VipLlavaModel.get_placeholder_mask   s    !.2M$2K2K2MT[[77uzzR_RfRfg3 " "4!7!7!;!*dkk.H.H!H+//1/99"=GGVYYZgZnZno)//2^5I5I!5LL+,2248L8L8NNHHXXcdtcuv  "!r    attention_maskposition_idsr&   	use_cacheoutput_attentionsrn   return_dictcache_positionreturnc                    |	|	n| j                   j                  }	|
|
n| j                   j                  }
||n| j                   j                  }||n| j                   j                  }|du |duz  rt        d      | | j                         |      }|_| j                  ||      }|j                  |j                  |j                        }| j                  |||      }|j                  ||      } | j                  d||||||	|
d|d	|}t        |j                  |j                   |j"                  |j$                  |nd      }|r|S |j'                         S )	z
        vision_feature_layers (`Union[int, list[int]]`, *optional*):
            The vision feature layer, or the list of indexes of the layers to select
            the vision feature.
        Nz:You must specify exactly one of input_ids or inputs_embedsrl   r4   )rw   rs   T)	r   r   r&   rw   r   r   rn   r   r   )last_hidden_stater&   r'   r(   r   r   )r,   r   rn   use_return_dictr4   r   r_   ru   r   rz   ry   r   masked_scatterrW   r   r   r&   r'   r(   to_tuple)rB   rv   rl   r   r   r&   rw   r4   r   r   rn   r   r   	lm_kwargsrs   r   outputsoutputs                     r!   rG   zVipLlavaModel.forward   s   , 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]%:%F!DKKLmLm 	 -t";<YZZ 7D557	BM#!44)AV 5 N ,..}/C/C]EXEXYN!%!:!:~ "; " *889K^\M%$%% 
)%+'/!5)
 
 -%77#33!//))2>2JPT
 %v;&//*;;r    rF   )NNNNNNNNNNNN)r   r   r   _checkpoint_conversion_mappingr   r2   r_   rb   ri   rk   r   r   r   r   r5   listru   
LongTensorr   r   Tensorr	   boolr)   r   rG   rH   rI   s   @r!   rV   rV      s    '=>N%O"~ :8&# im!--FNuUXZ^_bZcUcOdFe>"))":?:K:K"]b]n]n"0  15481537+/59AE$(,0/3&*59B<E,,-B< u001B< !.	B<
 u//0B< "%B<   1 12B<  (c49n(=>B< D>B< $D>B< 'tnB< d^B< !!1!12B< 
u11	2B< B<r    rV   zV
    The VIPLLAVA model which consists of a vision backbone and a language model.
    c            #       ~    e Zd ZdddddZdgZdef fdZd	 Zd
 Zde	j                  fdZd Zd Z	 d#dej                  deeeee   f      fdZed        Zed        Zed        Zee	 	 	 	 	 	 	 	 	 	 	 	 	 	 d$deej6                     deej                     deej8                     deej6                     dee   deej                     deeeee   f      deej6                     dee   dee   dee   dee   deej6                     d eeej8                  f   deee f   fd!              Z!	 	 	 	 	 	 d% fd"	Z" xZ#S )& VipLlavaForConditionalGenerationzmodel.language_modelzmodel.vision_towerzmodel.multi_modal_projectorlm_head)z^language_model.modelz^vision_towerz^multi_modal_projectorz^language_model.lm_headzlm_head.weightr,   c                     t         |   |       t        |      | _        t	        j
                  |j                  j                  |j                  j                  d      | _	        | j                          y )NFr/   )r1   r2   rV   modelr   r<   r=   r9   
vocab_sizer   r\   r]   s     r!   r2   z)VipLlavaForConditionalGeneration.__init__'  sS     "6*
yy!3!3!?!?ASASA^A^ejkr    c                 6    | j                   j                         S rF   )r   r_   r`   s    r!   r_   z5VipLlavaForConditionalGeneration.get_input_embeddings-  s    zz..00r    c                 :    | j                   j                  |       y rF   )r   rb   rc   s     r!   rb   z5VipLlavaForConditionalGeneration.set_input_embeddings0  s    

''.r    r   c                     | j                   S rF   )r   r`   s    r!   get_output_embeddingsz6VipLlavaForConditionalGeneration.get_output_embeddings3  s    ||r    c                 :    | j                   j                  |       y rF   )r   ri   rg   s     r!   ri   z,VipLlavaForConditionalGeneration.set_decoder6  s    

w'r    c                 6    | j                   j                         S rF   )r   rk   r`   s    r!   rk   z,VipLlavaForConditionalGeneration.get_decoder9  s    zz%%''r    rl   r4   c                 <    | j                   j                  ||      S )Nr   )r   ru   )rB   rl   r4   s      r!   ru   z3VipLlavaForConditionalGeneration.get_image_features<  s     zz,,,^s,ttr    c                 .    | j                   j                  S rF   )r   rW   r`   s    r!   rW   z/VipLlavaForConditionalGeneration.language_modelB  s    zz(((r    c                 .    | j                   j                  S rF   )r   rZ   r`   s    r!   rZ   z-VipLlavaForConditionalGeneration.vision_towerF  s    zz&&&r    c                 .    | j                   j                  S rF   )r   r[   r`   s    r!   r[   z6VipLlavaForConditionalGeneration.multi_modal_projectorJ  s    zz///r    rv   r   r   r&   rw   labelsr   r   rn   r   r   logits_to_keepc                 l   |
|
n| j                   j                  }
||n| j                   j                  }||n| j                   j                  }||n| j                   j                  } | j
                  d|||||||	||
|d|d|}|d   }t        |t              rt        | d      n|}| j                  |dd|ddf         }d}|2| j                  ||| j                   j                  j                        }t        |||j                  |j                  |j                   |j"                        S )a  
        vision_feature_layers (`Union[int, list[int]]`, *optional*):
            The vision feature layer, or the list of indexes of the layers to select
            the vision feature.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
            config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
            (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.

        Example:

        ```python
        >>> import torch
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, VipLlavaForConditionalGeneration

        >>> model = VipLlavaForConditionalGeneration.from_pretrained("llava-hf/vip-llava-7b-hf", device_map="auto", dtype=torch.float16)
        >>> processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf")

        >>> prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.###Human: <image>\n{}###Assistant:"
        >>> question = "Can you please describe this image?"
        >>> prompt = prompt.format(question)
        >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(text=text, images=image, return_tensors="pt").to(0, torch.float16)

        >>> # Generate
        >>> generate_ids = model.generate(**inputs, max_new_tokens=20)
        >>> processor.decode(generate_ids[0][len(inputs["input_ids"][0]):], skip_special_tokens=True)
        The image features a brown and white cat sitting on a green surface, with a red ball in its
        ```NT)rv   rl   r   r   r&   rw   r   r4   r   rn   r   r   r   )r%   r   r   )r$   r%   r&   r'   r(   r   r   )r,   r   rn   r   r4   r   r3   r5   slicer   loss_functionr=   r   r#   r&   r'   r(   r   )rB   rv   rl   r   r   r&   rw   r4   r   r   r   rn   r   r   r   r   r   r'   slice_indicesr%   r$   s                        r!   rG   z(VipLlavaForConditionalGeneration.forwardN  s[   l 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]%:%F!DKKLmLm 	 $** 
%)%+'"7/!5)
 
   
8B>SV8W~ot4]kmA}a,?@A%%VFt{{OfOfOqOq%rD-#33!//)) ' ; ;
 	
r    c           	      N    t        
|   |f|||||d|}	|d   dk(  r||	d<   |	S )N)r&   rw   r   r   r   r   rl   )r1   prepare_inputs_for_generation)rB   rv   r&   rw   rl   r   r   r   kwargsmodel_inputsrD   s             r!   r   z>VipLlavaForConditionalGeneration.prepare_inputs_for_generation  sV     w<
+')))
 
 !! ,8L(r    rF   )NNNNNNNNNNNNNr   )NNNNNN)$r   r   r   r   _tied_weights_keysr   r2   r_   rb   r   Moduler   ri   rk   r   r   r   r   r5   r   ru   propertyrW   rZ   r[   r   r   r   r   r	   r   r)   r#   rG   r   rH   rI   s   @r!   r   r     sb    "8-"?#,	&" ++~ 1/ryy (( imu!--uFNuUXZ^_bZcUcOdFeu ) ) ' ' 0 0  15481537+/59AE-1$(,0/3&*5934]
E,,-]
 u001]
 !.	]

 u//0]
 "%]
   1 12]
  (c49n(=>]
 ))*]
 D>]
 $D>]
 'tn]
 d^]
 !!1!12]
 c5<</0]
" 
u44	5#]
  ]
D  r    r   )rV   r   rK   )!dataclassesr   typingr   r   r   r   activationsr   cache_utilsr	   
generationr
   modeling_outputsr   r   modeling_utilsr   utilsr   r   autor   configuration_vipllavar   r   r#   r   r+   rK   rV   r   __all__r   r    r!   <module>r      s  , " "   !   ) D - 5  2 
<"9 < < 
<[ < <0")) 0 'o ' ' 
Q<+ Q<
Q<h 
m'> m
m` [r    