
    Phq                        d dl Z d dlmZ d dlmZmZmZ d dlZd dlm	c m
Z d dlm	Z	 ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZmZ ddlmZmZ ddlmZ ddlmZmZmZm Z m!Z! ddl"m#Z# ddl$m%Z% ddl&m'Z'm(Z(m)Z) ee G d de                    Z* ed       G d de	jV                               Z, G d de	jV                        Z- G d de	jV                        Z. G d de	jV                        Z/	 d<de	jV                  dej`                  dej`                  d ej`                  d!eej`                     d"e1d#e1fd$Z2 G d% d&e	jV                        Z3 G d' d(e      Z4 G d) d*e	jV                        Z5 G d+ d,e	jV                        Z6e G d- d.e             Z7 ed/0       G d1 d2e7             Z8 ed30       G d4 d5e7             Z9d6ej`                  d7ej`                  fd8Z:e G d9 d:e7             Z;g d;Z<y)=    N)	dataclass)AnyCallableOptional)nn   )ACT2FN)use_kernel_forward_from_hub)create_causal_mask)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPooling)ALL_ATTENTION_FUNCTIONSPreTrainedModel)Unpack)ModelOutputTransformersKwargsauto_docstringcan_return_tuplefilter_out_non_signature_kwargs)deprecate_kwarg)check_model_inputs   )Aimv2ConfigAimv2TextConfigAimv2VisionConfigc                      e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eej                     ed<   dZeej                     ed<   dZeej                     ed<   dZeed<   dZeed	<   d
ee   fdZy)Aimv2Outputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
        Contrastive loss for image-text similarity.
    logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
        The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
        similarity scores.
    logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
        The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
        similarity scores.
    text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The text embeddings obtained by applying the projection layer to the pooled output of [`Aimv2TextModel`].
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The image embeddings obtained by applying the projection layer to the pooled output of [`Aimv2VisionModel`].
    text_model_output (`BaseModelOutputWithPooling`):
        The output of the [`Aimv2TextModel`].
    vision_model_output (`BaseModelOutputWithPooling`):
        The output of the [`Aimv2VisionModel`].
    Nlosslogits_per_imagelogits_per_texttext_embedsimage_embedstext_model_outputvision_model_outputreturnc                 H     t         fd j                         D              S )Nc              3   d   K   | ]'  }|d vr|   nt        |      j                          ) yw))r$   r%   N)getattrto_tuple).0kselfs     b/var/www/html/saasai/venv/lib/python3.12/site-packages/transformers/models/aimv2/modeling_aimv2.py	<genexpr>z'Aimv2Output.to_tuple.<locals>.<genexpr>K   s=      
   LLDGRYZ^`aRbRkRkRmm s   -0)tuplekeysr-   s   `r.   r*   zAimv2Output.to_tupleJ   s#     
YY[
 
 	
    )__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r    r!   r"   r#   r$   r   r%   r0   r   r*    r3   r.   r   r   ,   s    & )-D(5$$
%,48hu001837OXe//07/3K%++,304L(5,,-448186:3:
%* 
r3   r   RMSNormc                   ,     e Zd Zd fd	Zd Zd Z xZS )Aimv2RMSNormc                     t         |           t        j                  t	        j
                  |            | _        || _        y)z;
        Aimv2RMSNorm is equivalent to T5LayerNorm
        N)super__init__r   	Parameterr8   onesweightvariance_epsilon)r-   hidden_sizeeps	__class__s      r.   rA   zAimv2RMSNorm.__init__S   s1     	ll5::k#:; #r3   c                 "   |j                   }|j                  t        j                        }|j	                  d      j                  dd      }|t        j                  || j                  z         z  }| j                  |j                  |      z  S )N   T)keepdim)	dtypetor8   float32powmeanrsqrtrE   rD   )r-   hidden_statesinput_dtypevariances       r.   forwardzAimv2RMSNorm.forward[   sy    #))%((7 $$Q',,R,>%Ht?T?T4T(UU{{]--k:::r3   c                 ^    t        | j                  j                         d| j                   S )Nz, eps=)r0   rD   shaperE   r2   s    r.   
extra_reprzAimv2RMSNorm.extra_reprb   s*    ))*+6$2G2G1HIIr3   )gư>)r4   r5   r6   rA   rV   rY   __classcell__rH   s   @r.   r>   r>   Q   s    $;Jr3   r>   c                   $     e Zd Z fdZd Z xZS )Aimv2MLPc                    t         |           || _        |j                  | _        |j                  | _        t        j                  | j                  | j                  |j                        | _        t        j                  | j                  | j                  |j                        | _	        t        j                  | j                  | j                  |j                        | _
        t        |j                     | _        y )Nbias)r@   rA   configrF   intermediate_sizer   Linearmlp_bias	gate_projup_proj	down_projr	   
hidden_actact_fnr-   ra   rH   s     r.   rA   zAimv2MLP.__init__g   s    !--!'!9!94#3#3T5K5KRXRaRabyy!1!143I3IPVP_P_`4#9#94;K;KRXRaRabV../r3   c                     | j                  | j                  | j                  |            | j                  |      z        }|S N)rg   ri   re   rf   )r-   xrg   s      r.   rV   zAimv2MLP.forwardq   s6    NN4;;t~~a/@#ADLLQRO#ST	r3   )r4   r5   r6   rA   rV   rZ   r[   s   @r.   r]   r]   f   s    0r3   r]   c                        e Zd Zdef fdZedddej                  fdej                  fd       Z	dej                  dej                  fd	Z
 xZS )
Aimv2VisionEmbeddingsra   c                 B   t         |           || _        |j                  | _        t	        j
                  |j                  |j                  |j                  |j                        | _        t        |j                  |j                        | _        |j                  |j                  z  dz  }| j                  j                  s%t	        j                  ||j                        | _        | j!                  dt#        j$                  |      j'                  d      d       y )N)kernel_sizestriderJ   position_idsr   rK   F
persistent)r@   rA   ra   
patch_sizer   Conv2dnum_channelsrF   patch_embedr>   rms_norm_epsrms_norm
image_size	is_native	Embeddingposition_embeddingregister_bufferr8   arangeexpand)r-   ra   num_patchesrH   s      r.   rA   zAimv2VisionEmbeddings.__init__w   s     ++99!3!3ARAR[a[l[l
 %V%7%79L9LM((F,=,==!C{{$$&(ll;@R@R&SD#^U\\+-F-M-Mg-Vchir3      g     @cpur&   c                 :   t        j                  t        |      ||      }t        j                  t        |       ||      }t        j                  ||d      \  }}|dz  }t        j                  |||      |z  }	d||	z  z  }	|j	                         d   |	d d d f   z  }
|j	                         d   |	d d d f   z  }t        j
                  |
j                         |
j                         |j                         |j                         gd      d d d d d f   S )	NrM   devicexy)indexing   g      ?).Nr   dim)r8   r   intmeshgridflattenconcatsincos)heightwidth	embed_dimtemperaturer   rM   grid_wgrid_hpos_dimomegaout_hout_ws               r.   "build_2d_sincos_position_embeddingz8Aimv2VisionEmbeddings.build_2d_sincos_position_embedding   s     c%jfEc&kvFFq.WE&AGK{E)* +eD!Gn< +eD!Gn<||UYY[%))+uyy{EIIKPVWXY]_`bcYcddr3   pixel_valuesc                    |j                         \  }}}}| j                  |      j                  d      j                  dd      }| j	                  |      }| j
                  j                  rY| j                  || j                  z  || j                  z  | j
                  j                  |j                  |j                        }n| j                  | j                        }||z   }|S )NrJ   r   )r   r   rM   )sizerz   r   	transposer|   ra   r~   r   rw   rF   r   rM   r   rs   )r-   r   _r   r   rS   	pos_embeds          r.   rV   zAimv2VisionEmbeddings.forward   s    *//11fe((6>>qAKKAqQm4;;  ??$//)(++11$++#)) @ I //0A0ABI%	1r3   )r4   r5   r6   r   rA   staticmethodr8   rO   Tensorr   rV   rZ   r[   s   @r.   ro   ro   v   s]    j0 j !$'%u}}e	e e ELL U\\ r3   ro   c            	            e Zd Zdef fdZ	 	 	 ddeej                     deej                     deej                     dej                  fdZ
 xZS )	Aimv2TextEmbeddingsra   c                 N   t         |           |j                  }t        j                  |j
                  |      | _        t        j                  |j                  |      | _        | j                  dt        j                  |j                        j                  d      d       y )Nrs   rt   Fru   )r@   rA   rF   r   r   
vocab_sizetoken_embeddingmax_position_embeddingsr   r   r8   r   r   )r-   ra   r   rH   s      r.   rA   zAimv2TextEmbeddings.__init__   s    &&	!||F,=,=yI"$,,v/M/My"Y 	ELL)G)GHOOPWXej 	 	
r3   	input_idsrs   inputs_embedsr&   c                 8   ||j                   d   n|j                   d   }| j                  j                  j                   d   }||kD  rt        d| d|       || j                  d d d |f   }|| j                  |      }| j                  |      }||z   }|S )NrK   r   zRSequence length must be less than max_position_embeddings (got `sequence length`: z and max_position_embeddings: )rX   r   rD   
ValueErrorrs   r   )r-   r   rs   r   
seq_lengthmax_position_embeddingposition_embeddings
embeddingss           r.   rV   zAimv2TextEmbeddings.forward   s     -6,AY__R(}GZGZ[]G^
!%!8!8!?!?!E!Ea!H..d,<=S<TV 
 ,,Q^<L  00;M"55lC"%88
r3   NNN)r4   r5   r6   r   rA   r   r8   
LongTensorr9   r   rV   rZ   r[   s   @r.   r   r      sj    

 

 153759	E,,- u//0   1 12	
 
r3   r   modulequerykeyvalueattention_maskscalingdropoutc                    t        j                  ||j                  dd            |z  }|||z   }t        j                  j                  |dt         j                        j                  |j                        }t        j                  j                  ||| j                        }t        j                  ||      }	|	j                  dd      j                         }	|	|fS )NrK   r   )r   rM   )ptrainingr   rJ   )r8   matmulr   r   
functionalsoftmaxrO   rN   rM   r   r   
contiguous)
r   r   r   r   r   r   r   kwargsattn_weightsattn_outputs
             r.   eager_attention_forwardr      s     <<s}}R'<=GL!#n4==((2U]](SVVW\WbWbcL==((6??([L,,|U3K''1-88:K$$r3   c            
            e Zd ZdZ fdZ	 ddej                  deej                     deej                  eej                     f   fdZ	 xZ
S )Aimv2Attentionz=Multi-headed attention from 'Attention Is All You Need' paperc                 x   t         |           || _        |j                  | _        |j
                  | _        | j                  | j                  z  | _        | j                  | j                  z  | j                  k7  r&t        d| j                   d| j                   d      | j                  dz  | _	        |j                  | _        d| _        t        j                  | j                  | j                  |j                        | _        t        j                  | j                  | j                  |j                        | _        t        j                  | j                  | j                  |j                        | _        t        j                  | j                  | j                  |j                        | _        y )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      Fr_   )r@   rA   ra   rF   r   num_attention_heads	num_headshead_dimr   scaleattention_dropoutr   	is_causalr   rc   qkv_biask_projv_projq_projout_projrj   s     r.   rA   zAimv2Attention.__init__   s2   ++33$..8==4>>)T^^;MdnnM] ^NN#2'  ]]D(
//iiV__UiiV__UiiV__U		$..$..vWr3   rS   r   r&   c           
      :   |j                   \  }}}| j                  |      }| j                  |      }| j                  |      }	|j	                  ||| j
                  | j                        j                  dd      }|j	                  ||| j
                  | j                        j                  dd      }|	j	                  ||| j
                  | j                        j                  dd      }	t        }
| j                  j                  dk7  rt        | j                  j                     }
 |
| |||	|| j                  | j                  | j                  sdn| j                        \  }}|j!                  |||      j#                         }| j%                  |      }||fS )z#Input shape: Batch x Time x Channelr   rJ   eager        )r   r   r   )rX   r   r   r   viewr   r   r   r   ra   _attn_implementationr   r   r   r   r   reshaper   r   )r-   rS   r   r   
batch_sizer   r   queriesr1   valuesattention_interfacer   r   s                r.   rV   zAimv2Attention.forward   sa    -:,?,?)
J	++m,{{=)]+,,z:t~~t}}U__`acdeyyZOYYZ[]^_ZT^^T]]S]]^_abc(?;;++w6"9$++:Z:Z"[$7nnJJ#}}C$,,	%
!\ "))*j)LWWYmmK0L((r3   rl   )r4   r5   r6   r7   rA   r8   r   r   r0   rV   rZ   r[   s   @r.   r   r      sV    GX, 26$)||$) !.$)
 
u||Xell33	4$)r3   r   c            	            e Zd Zdef fdZ	 ddej                  deej                     dee	   dej                  fdZ
 xZS )	Aimv2EncoderLayerra   c                     t         |           t        |      | _        t	        |      | _        t        |j                  |j                        | _	        t        |j                  |j                        | _
        y rl   )r@   rA   r   	attentionr]   ffnr>   rF   r{   	rms_norm1	rms_norm2rj   s     r.   rA   zAimv2EncoderLayer.__init__'  sZ    '/F#%f&8&8&:M:MN%f&8&8&:M:MNr3   rS   r   r   r&   c                     | j                  |      } | j                  d||d|\  }}||z   }| j                  |      }| j                  |      }||z   }|S )N)rS   r   r;   )r   r   r   r   )r-   rS   r   r   norm_hidden_statesr   r   
mlp_outputs           r.   rV   zAimv2EncoderLayer.forward.  sl     "^^M:'r6HYgrkqrQ%3!^^M:XX01
%
2r3   rl   )r4   r5   r6   r   rA   r8   r   r   r   r   rV   rZ   r[   s   @r.   r   r   &  sY    O0 O 26|| !. +,	
 
r3   r   c                   j     e Zd ZdZdef fdZe	 ddeej                     de
e   defd       Z xZS )	Aimv2Encoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`Aimv2EncoderLayer`].

    Args:
        config: Aimv2Config
    ra   c                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w )NF)
r@   rA   ra   r   
ModuleListrangenum_hidden_layersr   layersgradient_checkpointing)r-   ra   r   rH   s      r.   rA   zAimv2Encoder.__init__H  sU    mmfNfNfHg$hHg1%6v%>Hg$hi&+# %is   A#r   r   r&   c                 T    |}| j                   D ]  } |||fi |} t        |      S )N)last_hidden_state)r   r   )r-   r   r   r   rS   encoder_layers         r.   rV   zAimv2Encoder.forwardO  s>     &![[M) M ) ??r3   rl   )r4   r5   r6   r7   r   rA   r   r   r8   r   r   r   r   rV   rZ   r[   s   @r.   r   r   ?  s_    ,{ ,  26@ !.@ +,	@
 
@ @r3   r   c                   \     e Zd Zdef fdZdej                  dej                  fdZ xZS )Aimv2AttentionPoolingHeadra   c                 &   t         |           |j                  | _        |j                  | _        t        j                  | j                  | j                  |j                        | _        t        j                  | j                  | j                  |j                        | _	        t        j                  t        j                  dd| j                              | _        t        j                  | j                  | j                  d      | _        y )Nr_   r   T)r@   rA   rF   r   r   r   rc   r   r   r   rB   r8   zeros	cls_tokenoutput_projrj   s     r.   rA   z"Aimv2AttentionPoolingHead.__init__b  s    !--33ii 0 0$2B2BYii 0 0$2B2BYekk!Q8H8H&IJ99T%5%5t7G7GdSr3   rS   r&   c                    |j                   \  }}}| j                  j                  |dd      }| j                  |      j	                  ||| j
                  || j
                  z        }| j                  |      j	                  ||| j
                  || j
                  z        }|j	                  |d| j
                  || j
                  z        }|j                  dddd      }|j                  dddd      }|j                  dddd      }t        j                  |||      }	|	j                  dd      j	                  |d|      }	|	j                  d      }	| j                  |	      }
|
S )NrK   r   r   rJ   r   r   )rX   r   r   r   r   r   r   permuteFscaled_dot_product_attentionr   rQ   r   )r-   rS   r   seq_len
hidden_dimr   r   r   r   r   outputs              r.   rV   z!Aimv2AttentionPoolingHead.forwardm  sH   *7*=*='
GZNN))*b"=	kk-(00WdnnV`dhdrdrVrsM*22:wXbfjftftXtu!!*at~~A]^kk!Q1%aAq)aAq)44UCG!++Aq199*aT!&&1&-!!+.r3   )	r4   r5   r6   r   rA   r8   r   rV   rZ   r[   s   @r.   r   r   a  s-    	T0 	TU\\ ell r3   r   c                   J     e Zd ZU dZeed<   dZdZg dZdZ	dZ
dZ fdZ xZS )Aimv2PreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models. The model is only intended for inference and doesn't support finetuning.
    ra   aimv2T)r   r   ro   r   c                    t         |   |       t        |d      r^t        |j                  t
        j                        r9|j                  j                  j                  t        j                  d             y y t        |t              r<|j                  j                  j                  d| j                  j                         y y )Nlogit_scaleg$I$I,@r   )rQ   std)r@   _init_weightshasattr
isinstancer	  r   rB   datafill_mathlogr   r   normal_ra   initializer_range)r-   r   rH   s     r.   r  z"Aimv2PreTrainedModel._init_weights  s    f%6=)&,,bll;""''--dhhx.@A < 9:!!))s8U8U)V ;r3   )r4   r5   r6   r7   r   r:   base_model_prefixsupports_gradient_checkpointing_no_split_modules_supports_sdpa_supports_flash_attn_supports_flex_attnr  rZ   r[   s   @r.   r  r    sC    
 &*# NW Wr3   r  zL
    The Vision model from AIMv2 without any head or projection on top.
    )custom_introc            
            e Zd ZU eed<   dZeedZdef fdZ	de
j                  fdZ edd	      ee	 ddeej$                     d
ee   defd                     Z xZS )Aimv2VisionModelra   r   rS   
attentionsc                 6   t         |   |       || _        t        |      | _        t        |      | _        t        |j                  |j                        | _
        |j                  | _        | j                  rt        |      | _        | j                          y rl   )r@   rA   ra   ro   r   r   encoderr>   rF   r{   r|   use_headr   head	post_initrj   s     r.   rA   zAimv2VisionModel.__init__  sq     /7#F+$V%7%79L9LM==1&9DIr3   r&   c                 .    | j                   j                  S rl   )r   rz   r2   s    r.   get_input_embeddingsz%Aimv2VisionModel.get_input_embeddings  s    ***r3   r   zv4.58.0)versionr   c                     | j                  |      } | j                  dd|i|}|j                  }| j                  |      }| j                  r| j                  |      nd}t        ||      S )a  
        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, Siglip2VisionModel

        >>> model = Aimv2VisionModel.from_pretrained("apple/aimv2-large-patch14-native")
        >>> processor = AutoProcessor.from_pretrained("apple/aimv2-large-patch14-native")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled features
        ```r   Nr   pooler_outputr;   )r   r   r   r|   r!  r"  r   )r-   r   r   r   rS   encoder_outputsr   r)  s           r.   rV   zAimv2VisionModel.forward  sz    : 5+74<< ,
',
,

 ,== MM*;<8<		"344)/'
 	
r3   rl   )r4   r5   r6   r   r:   main_input_namer   r   _can_record_outputsrA   r   Moduler%  r   r   r   r   r8   r   r   r   r   rV   rZ   r[   s   @r.   r  r    s     $O*$
0 +bii + %y9 26)
 !.)
 +,	)

 
$)
   :)
r3   r  zJ
    The text model from AIMv2 without any head or projection on top.
    c            	            e Zd ZdZeedZdef fdZde	j                  fdZd Zee	 ddeej"                     d	ee   defd
              Z xZS )Aimv2TextModelr   r  ra   c                     t         |   |       || _        t        |      | _        t        |      | _        t        |j                  |j                        | _
        |j                  | _        | j                          y rl   )r@   rA   ra   r   r   r   r   r>   rF   r{   r|   eos_token_idr#  rj   s     r.   rA   zAimv2TextModel.__init__  sa     -f5#F+$V%7%79L9LM"//r3   r&   c                 .    | j                   j                  S rl   r   r   r2   s    r.   r%  z#Aimv2TextModel.get_input_embeddings  s    ...r3   c                 &    || j                   _        y rl   r3  )r-   r   s     r.   set_input_embeddingsz#Aimv2TextModel.set_input_embeddings  s    */'r3   r   r   c                    | j                  |      }|j                  \  }}}t        j                  |t        j                  |j
                        }|j                  d      j                  |d      }	|t        | j                  ||	||d       } | j                  d	||d|}
|
j                  }| j                  |      }|t        j                  |j                  d   |j
                        |j                  t        j                  |j
                        | j                  k(  j                         j!                  d      f   }t#        ||      S )
Nr   r   rK   )ra   input_embedsrs   r   cache_positionpast_key_values)r   r   )r   r   r(  r;   )r   rX   r8   r   longr   	unsqueezer   r   ra   r   r   r|   rN   r   r1  argmaxr   )r-   r   r   r   rS   r   r  r   r8  rs   r*  r   pooled_outputs                r.   rV   zAimv2TextModel.forward  sN    	2!.!4!4
GQgUZZH\H\]%//299*bI%/{{*)-- $N '$,, 
')
 
 ,== MM*;< *LL*003<M<T<TU\\		2C2J2J\KtO`O``eegnnsunvx

 */'
 	
r3   rl   )r4   r5   r6   r+  r   r   r,  r   rA   r   r-  r%  r5  r   r   r   r8   r   r   r   r   rV   rZ   r[   s   @r.   r/  r/    s     "O +$
	 	/bii /0  26'
 !.'
 +,	'

 
$'
  '
r3   r/  tensorr&   c                     t        j                  | d      }t        j                  |dd      }t        j                  |d      }|S )z
    This method is equivalent to tensor.norm(p=2, dim=-1, keepdim=True) and used to make
    model `executorch` exportable. See issue https://github.com/pytorch/executorch/issues/3566
    rJ   rK   T)r   rL   g      ?)r8   rP   sum)r>  square_tensor
sum_tensornormed_tensors       r.   _get_vector_normrD  7  s<    
 IIfa(M=b$?JIIj#.Mr3   c                       e Zd ZU eed<   g dZdZdef fdZ e       e		 	 dde
j                  dee
j                     dee
j                     de
j                  fd	              Z e       e		 dd
e
j                  dede
j                  fd              Ze	e	 	 	 ddee
j$                     d
ee
j                     dee
j                     dee   def
d              Z xZS )
Aimv2Modelra   )r   r   ro   Tc                    t         |   |       |j                  | _        |j                  j                  | _        |j                  j                  | _        t        j                  |j                        | _
        t        j                  |j                        | _        t        j                  | j
                  | j                  d      | _        t        j                  | j                  | j                  d      | _        t        j"                  t%        j&                  | j(                  j*                              | _        t/        j0                  |j2                        | _        | j7                          y )NFr_   )r@   rA   projection_dimvision_configrF   vision_embed_dimtext_configtext_embed_dimr  _from_configvision_modelr/  
text_modelr   rc   visual_projectiontext_projectionrB   r8   r>  ra   logit_scale_init_valuer	  r  r  max_logit_scalemax_log_logit_scaler#  rj   s     r.   rA   zAimv2Model.__init__H  s     $33 & 4 4 @ @$00<<,99&:N:NO(55f6H6HI!#4+@+@$BUBU\a!b!yy)<)<d>Q>QX]^<<T[[5W5W(XY#'88F,B,B#C r3   r   r   rs   r&   c                 h    | j                  |||      }|j                  }| j                  |      }|S )a  
        Returns:
            text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
            applying the projection layer to the pooled output of [`Aimv2TextModel`].

        Examples:

        ```python
        >>> import torch
        >>> from transformers import AutoTokenizer, Aimv2Model

        >>> model = Aimv2Model.from_pretrained("openai/aimv2-vit-base-patch32")
        >>> tokenizer = AutoTokenizer.from_pretrained("openai/aimv2-vit-base-patch32")

        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")

        >>> with torch.inference_mode():
        ...     text_features = model.get_text_features(**inputs)
        ```)r   r   rs   )rO  r)  rQ  )r-   r   r   rs   text_outputsr=  text_featuress          r.   get_text_featureszAimv2Model.get_text_featuresZ  sD    6 48??)% 4C 4

 %22,,];r3   r   interpolate_pos_encodingc                 f    | j                  ||      }|j                  }| j                  |      }|S )an  
        Returns:
            image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
            applying the projection layer to the pooled output of [`Aimv2VisionModel`].

        Examples:

        ```python
        >>> import torch
        >>> from transformers import AutoProcessor, Aimv2Model
        >>> from transformers.image_utils import load_image

        >>> model = Aimv2Model.from_pretrained("openai/aimv2-vit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("openai/aimv2-vit-base-patch32")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = load_image(url)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> with torch.inference_mode():
        ...     image_features = model.get_image_features(**inputs)
        ```)r   rY  )rN  r)  rP  )r-   r   rY  vision_outputsr=  image_featuress         r.   get_image_featureszAimv2Model.get_image_features  sC    < 6:5F5F%%= 6G 6
 '44//>r3   r   c                     | j                   dd|i|} | j                  d||d|}|j                  }| j                  |      }|j                  }| j	                  |      }|t        |      z  }|t        |      z  }| j                  j                  d| j                        j                         j                  |j                        }	|	|z  |j                         z  }
|
j                         }t        ||
||||      S )a  
        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, Aimv2Model

        >>> model = Aimv2Model.from_pretrained("apple/aimv2-large-patch14-224-lit")
        >>> processor = AutoProcessor.from_pretrained("apple/aimv2-large-patch14-224-lit")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(
        ...     text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
        ... )

        >>> outputs = model(**inputs)
        >>> logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
        >>> probs = logits_per_image.softmax(dim=1)  # we can take the softmax to get the label probabilities
        ```r   )r   r   r   )r    r!   r"   r#   r$   r%   r;   )rN  rO  r)  rP  rQ  rD  r	  clamprT  exprN   r   tr   )r-   r   r   r   r   r[  rV  r#   r"   r	  r!   r    s               r.   rV   zAimv2Model.forward  s&   > 6GT5F5F 6
%6
6

 4C4?? 4
)4
 4
 &33--l;"00**;7 $&6|&DD!$4[$AA&&,,S$2J2JKOOQTTU`UgUgh&48HH*,,.-+#%* .
 	
r3   )NN)Fr   )r4   r5   r6   r   r:   r  r  rA   r   r   r8   r   r   r9   rX  boolr]  r   r   r   r   r   rV   rZ   r[   s   @r.   rF  rF  B  sT   ]{ $ %& 26/3	!<<! !.! u||,	!
 
		!  '!F %& */#''# #'# 
			#  '#J  154815	=
E,,-=
 u001=
 !.	=

 +,=
 
=
  =
r3   rF  )r  rF  r  r/  )r   )=r  dataclassesr   typingr   r   r   r8   torch.nn.functionalr   r   r   activationsr	   integrationsr
   masking_utilsr   modeling_layersr   modeling_outputsr   r   modeling_utilsr   r   processing_utilsr   utilsr   r   r   r   r   utils.deprecationr   utils.genericr   configuration_aimv2r   r   r   r   r-  r>   r]   ro   r   r   floatr   r   r   r   r   r  r  r/  rD  rF  __all__r;   r3   r.   <module>rs     s  .  ! * *     ! 7 / 9 K F & w w 0 / P P  
+  
   
F Y'J299 J (J(ryy  1BII 1h%")) %^ %II%<<% 
% <<	%
 U\\*% % %.:)RYY :)z2 2@299 @D		 D W? W W8 
E
+ E

E
P 
B
) B

B
JU\\ ell  b
% b
 b
J Wr3   