
    h                        d Z ddlmZ ddlmZmZmZmZ ddlZddlm	Z	 ddl
mZ ddlmZmZ dd	lmZ dd
lmZmZmZ ddlmZmZ ddlmZmZmZmZmZmZ ddlm Z m!Z!m"Z"  ejF                  e$      Z%dejL                  dejL                  fdZ'dejL                  dejL                  fdZ(dejL                  dejL                  fdZ)e ed       G d de                    Z*e ed       G d de                    Z+ee G d de                    Z, G d d e	jZ                        Z. G d! d"e	jZ                        Z/	 	 dJd#e	jZ                  d$ejL                  d%ejL                  d&ejL                  d'eejL                     d(e0d)e0d*e1fd+Z2 G d, d-e	jZ                        Z3 G d. d/e	jZ                        Z4 G d0 d1e      Z5e G d2 d3e             Z6 G d4 d5e	jZ                        Z7 G d6 d7e	jZ                        Z8 ed8       G d9 d:e6             Z9 G d; d<e	jZ                        Z: ed=       G d> d?e6             Z;e G d@ dAe6             Z<e G dB dCe6             Z=e G dD dEe6             Z> edF       G dG dHe6             Z?g dIZ@y)KzPyTorch CLIP model.    )	dataclass)AnyCallableOptionalUnionN)nn   )ACT2FN) _create_4d_causal_attention_mask_prepare_4d_attention_mask)GradientCheckpointingLayer)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)ALL_ATTENTION_FUNCTIONSPreTrainedModel)ModelOutputauto_docstringcan_return_tuplefilter_out_non_signature_kwargslogging	torch_int   )
CLIPConfigCLIPTextConfigCLIPVisionConfiglogitsreturnc                     t         j                  j                  | t        j                  t        |       | j                              S )Ndevice)r   
functionalcross_entropytorcharangelenr!   )r   s    e/var/www/html/aiagenthome/venv/lib/python3.12/site-packages/transformers/models/clip/modeling_clip.pycontrastive_lossr(   %   s/    ==&&vu||CKPVP]P]/^__    
similarityc                 Z    t        |       }t        | j                               }||z   dz  S )Ng       @)r(   t)r*   caption_loss
image_losss      r'   	clip_lossr/   )   s,    #J/L!*,,.1J:%,,r)   tensorc                     t        j                  | d      }t        j                  |dd      }t        j                  |d      }|S )z
    This method is equivalent to tensor.norm(p=2, dim=-1, keepdim=True) and used to make
    model `executorch` exportable. See issue https://github.com/pytorch/executorch/issues/3566
       T)dimkeepdim      ?)r$   powsum)r0   square_tensor
sum_tensornormed_tensors       r'   _get_vector_normr<   /   s<    
 IIfa(M=b$?JIIj#.Mr)   z}
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
    )custom_introc                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   y)CLIPVisionModelOutputz
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
        The image embeddings obtained by applying the projection layer to the pooler_output.
    Nimage_embedslast_hidden_state.hidden_states
attentions)__name__
__module____qualname____doc__r@   r   r$   FloatTensor__annotations__rA   rB   tuplerC    r)   r'   r?   r?   :   sr    
 15L(5,,-459x 1 129=AM8E%"3"3S"89:A:>Ju00#567>r)   r?   ze
    Base class for text model's outputs that also contains a pooling of the last hidden states.
    c                       e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eeej                  df      ed<   dZeeej                  df      ed<   y)CLIPTextModelOutputz
    text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
        The text embeddings obtained by applying the projection layer to the pooler_output.
    Ntext_embedsrA   .rB   rC   )rD   rE   rF   rG   rN   r   r$   rH   rI   rA   rB   rJ   rC   rK   r)   r'   rM   rM   L   sr    
 04K%++,359x 1 129=AM8E%"3"3S"89:A:>Ju00#567>r)   rM   c                      e Zd ZU dZdZeej                     ed<   dZ	eej                     ed<   dZ
eej                     ed<   dZeej                     ed<   dZeej                     ed<   dZeed<   dZeed	<   d
ee   fdZy)
CLIPOutputa  
    loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
        Contrastive loss for image-text similarity.
    logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
        The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
        similarity scores.
    logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
        The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
        similarity scores.
    text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`].
    image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
        The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`].
    text_model_output (`BaseModelOutputWithPooling`):
        The output of the [`CLIPTextModel`].
    vision_model_output (`BaseModelOutputWithPooling`):
        The output of the [`CLIPVisionModel`].
    Nlosslogits_per_imagelogits_per_textrN   r@   text_model_outputvision_model_outputr   c                 H     t         fd j                         D              S )Nc              3   d   K   | ]'  }|d vr|   nt        |      j                          ) yw))rT   rU   N)getattrto_tuple).0kselfs     r'   	<genexpr>z&CLIPOutput.to_tuple.<locals>.<genexpr>}   s=      
   LLDGRYZ^`aRbRkRkRmm s   -0)rJ   keysr\   s   `r'   rY   zCLIPOutput.to_tuple|   s#     
YY[
 
 	
r)   )rD   rE   rF   rG   rQ   r   r$   rH   rI   rR   rS   rN   r@   rT   r   rU   rJ   r   rY   rK   r)   r'   rP   rP   ^   s    & )-D(5$$
%,48hu001837OXe//07/3K%++,304L(5,,-448186:3:
%* 
r)   rP   c                        e Zd Zdef fdZdej                  dededej                  fdZd
dej                  dej                  fd	Z
 xZS )CLIPVisionEmbeddingsconfigc                    t         |           || _        |j                  | _        |j
                  | _        |j                  | _        t        j                  t        j                  | j                              | _        t        j                  |j                  | j                  | j                  | j                  d      | _        | j
                  | j                  z  dz  | _        | j                  dz   | _        t        j"                  | j                   | j                        | _        | j'                  dt        j(                  | j                         j+                  d      d       y )NF)in_channelsout_channelskernel_sizestridebiasr2   r   position_idsr   r3   
persistent)super__init__rb   hidden_size	embed_dim
image_size
patch_sizer   	Parameterr$   randnclass_embeddingConv2dnum_channelspatch_embeddingnum_patchesnum_positions	Embeddingposition_embeddingregister_bufferr%   expandr\   rb   	__class__s     r'   rn   zCLIPVisionEmbeddings.__init__   s	   ++ ++ ++!||EKK,GH!yy++?? 
 !OOt>1D!--1"$,,t/A/A4>>"R^U\\$:L:L-M-T-TU\-]jopr)   
embeddingsheightwidthr   c                    |j                   d   dz
  }| j                  j                  j                  d      }|j                   d   dz
  }t        j
                  j                         s%||k(  r ||k(  r| j                  | j                        S |ddddf   }|ddddf   }|j                   d   }	|| j                  z  }
|| j                  z  }t        |dz        }|j                  d|||	      }|j                  dddd      }t        j                  j                  ||
|fdd	
      }|j                  dddd      j                  dd|	      }t	        j                   ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   r   Nr3   r6   r	   r2   bicubicF)sizemodealign_cornersr4   )shaper|   weight	unsqueezer$   jit
is_tracingri   rr   r   reshapepermuter   r"   interpolateviewcat)r\   r   r   r   ry   r|   rz   class_pos_embedpatch_pos_embedr4   
new_height	new_widthsqrt_num_positionss                r'   interpolate_pos_encodingz-CLIPVisionEmbeddings.interpolate_pos_encoding   sv    !&&q)A-!44;;EEaH*003a7 yy##%+*F6UZ?**4+<+<==,QU3,QU3r"t.
T__,	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr)   pixel_valuesc                 `   |j                   \  }}}}|sJ|| j                  k7  s|| j                  k7  r,t        d| d| d| j                   d| j                   d	      | j                  j                  j
                  }| j                  |j                  |            }|j                  d      j                  dd      }| j                  j                  |dd      }	t        j                  |	|gd	      }
|r|
| j                  |
||      z   }
|
S |
| j                  | j                        z   }
|
S )
NzInput image size (*z) doesn't match model ().)dtyper2   r   r3   r   )r   rq   
ValueErrorrx   r   r   toflatten	transposeru   r~   r$   r   r   r|   ri   )r\   r   r   
batch_size_r   r   target_dtypepatch_embedsclass_embedsr   s              r'   forwardzCLIPVisionEmbeddings.forward   s6   '3'9'9$
Avu'Vt-F%SWSbSbJb$VHAeW4KDOOK\\]^b^m^m]nnpq  ++2288++LOO,O,OP#++A.88A>++22:q"EYYl;C
##d&C&CJPVX]&^^J  $d&=&=d>O>O&PPJr)   F)rD   rE   rF   r   rn   r$   Tensorintr   rH   r   __classcell__r   s   @r'   ra   ra      se    q/ q,'D5<< 'D 'DUX 'D]b]i]i 'DRE$5$5 Z_ZfZf r)   ra   c            	            e Zd Zdef fdZ	 	 	 ddeej                     deej                     deej                     dej                  fdZ
 xZS )	CLIPTextEmbeddingsrb   c                 N   t         |           |j                  }t        j                  |j
                  |      | _        t        j                  |j                  |      | _        | j                  dt        j                  |j                        j                  d      d       y )Nri   rj   Frk   )rm   rn   ro   r   r{   
vocab_sizetoken_embeddingmax_position_embeddingsr|   r}   r$   r%   r~   r\   rb   rp   r   s      r'   rn   zCLIPTextEmbeddings.__init__   s    &&	!||F,=,=yI"$,,v/M/My"Y 	ELL)G)GHOOPWXej 	 	
r)   	input_idsri   inputs_embedsr   c                 8   ||j                   d   n|j                   d   }| j                  j                  j                   d   }||kD  rt        d| d|       || j                  d d d |f   }|| j                  |      }| j                  |      }||z   }|S )Nr3   r   zRSequence length must be less than max_position_embeddings (got `sequence length`: z and max_position_embeddings: )r   r|   r   r   ri   r   )r\   r   ri   r   
seq_lengthmax_position_embeddingposition_embeddingsr   s           r'   r   zCLIPTextEmbeddings.forward   s     -6,AY__R(}GZGZ[]G^
!%!8!8!?!?!E!Ea!H..d,<=S<TV 
 ,,Q^<L  00;M"55lC"%88
r)   )NNN)rD   rE   rF   r   rn   r   r$   
LongTensorrH   r   r   r   r   s   @r'   r   r      sj    

~ 

 153759	E,,- u//0   1 12	
 
r)   r   modulequerykeyvalueattention_maskscalingdropoutoutput_attentionsc                    t        j                  ||j                  dd            |z  }	||	|z   }	t        j                  j                  |	dt         j                        j                  |j                        }	t        j                  j                  |	|| j                        }	t        j                  |	|      }
|
j                  dd      j                         }
|sd }	|
|	fS )Nr3   r   )r4   r   )ptrainingr   r2   )r$   matmulr   r   r"   softmaxfloat32r   r   r   r   
contiguous)r   r   r   r   r   r   r   r   kwargsattn_weightsattn_outputs              r'   eager_attention_forwardr      s     <<s}}R'<=GL!#n4==((2U]](SVVW\WbWbcL==((6??([L,,|U3K''1-88:K$$r)   c                        e Zd ZdZdeeef   f fdZ	 	 	 d
dej                  de
ej                     de
ej                     de
e   deej                  e
ej                     f   f
d	Z xZS )CLIPAttentionz=Multi-headed attention from 'Attention Is All You Need' paperrb   c                    t         |           || _        |j                  | _        |j
                  | _        | j                  | j                  z  | _        | j                  | j                  z  | j                  k7  r&t        d| j                   d| j                   d      | j                  dz  | _	        |j                  | _        d| _        t        j                  | j                  | j                        | _        t        j                  | j                  | j                        | _        t        j                  | j                  | j                        | _        t        j                  | j                  | j                        | _        y )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: r         F)rm   rn   rb   ro   rp   num_attention_heads	num_headshead_dimr   scaleattention_dropoutr   	is_causalr   Lineark_projv_projq_projout_projr   s     r'   rn   zCLIPAttention.__init__  s   ++33$..8==4>>)T^^;MdnnM] ^NN#2'  ]]D(
//ii?ii?ii?		$..$..Ar)   rB   r   causal_attention_maskr   r   c                 j   |j                   \  }}}| j                  |      }| j                  |      }	| j                  |      }
|j	                  ||d| j
                        j                  dd      }|	j	                  ||d| j
                        j                  dd      }	|
j	                  ||d| j
                        j                  dd      }
| j                  j                  dk(  r
|du| _	        n||||z   }n||}t        }| j                  j                  dk7  rt        | j                  j                     } || ||	|
|| j                  | j                  | j                  sdn| j                  |	      \  }}|j                  |||      j!                         }| j#                  |      }|sd}||fS )	z#Input shape: Batch x Time x Channelr3   r   r2   flash_attention_2Neager        )r   r   r   r   )r   r   r   r   r   r   r   rb   _attn_implementationr   r   r   r   r   r   r   r   r   )r\   rB   r   r   r   r   r   rp   queriesr^   valuesattention_interfacer   r   s                 r'   r   zCLIPAttention.forward-  s    -:,?,?)
J	++m,{{=)]+,,z:r4==ISSTUWXYyyZT]]CMMaQRSZRGQQRSUVW ;;++/BB2$>DN).C.O!/2G!G&2!6(?;;++w6"9$++:Z:Z"[$7nnJJ#}}C$,,/
%
!\ "))*j)LWWYmmK0 LL((r)   )NNF)rD   rE   rF   rG   r   r   r   rn   r$   r   r   boolrJ   r   r   r   s   @r'   r   r     s    GBu%5~%EF B. 268<,11)||1) !.1)  (5	1)
 $D>1) 
u||Xell33	41)r)   r   c                   V     e Zd Z fdZdej
                  dej
                  fdZ xZS )CLIPMLPc                    t         |           || _        t        |j                     | _        t        j                  |j                  |j                        | _
        t        j                  |j                  |j                        | _        y N)rm   rn   rb   r
   
hidden_actactivation_fnr   r   ro   intermediate_sizefc1fc2r   s     r'   rn   zCLIPMLP.__init__b  sd    #F$5$5699V//1I1IJ99V55v7I7IJr)   rB   r   c                 l    | j                  |      }| j                  |      }| j                  |      }|S r   )r   r   r   )r\   rB   s     r'   r   zCLIPMLP.forwardi  s4    /**=9/r)   )rD   rE   rF   rn   r$   r   r   r   r   s   @r'   r   r   a  s$    KU\\ ell r)   r   c                        e Zd Zdeeef   f fdZ	 d	dej                  dej                  dej                  de	e
   deej                     f
dZ xZS )
CLIPEncoderLayerrb   c                 D   t         |           |j                  | _        t	        |      | _        t        j                  | j                  |j                        | _	        t        |      | _        t        j                  | j                  |j                        | _        y N)eps)rm   rn   ro   rp   r   	self_attnr   	LayerNormlayer_norm_epslayer_norm1r   mlplayer_norm2r   s     r'   rn   zCLIPEncoderLayer.__init__q  sl    ++&v.<<F<Q<QR6?<<F<Q<QRr)   rB   r   r   r   r   c                     |}| j                  |      }| j                  ||||      \  }}||z   }|}| j                  |      }| j                  |      }||z   }|f}|r||fz  }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )rB   r   r   r   )r   r   r   r   )r\   rB   r   r   r   residualr   outputss           r'   r   zCLIPEncoderLayer.forwardy  s    " !((7&*nn')"7/	 '5 '
#| !=0 ((7/ =0 "&Gr)   r   )rD   rE   rF   r   r   r   rn   r$   r   r   r   rJ   rH   r   r   r   s   @r'   r   r   p  sq    Su%5~%EF S -2&||& &  %||	&
 $D>& 
u  	!&r)   r   c                   6    e Zd ZU eed<   dZdZdZdZdZ	dZ
d Zy)CLIPPreTrainedModelrb   clipTc                    | j                   j                  }t        |t              rj|j                  j
                  j                  j                  d|dz         |j                  j
                  j                  j                  d|dz         nMt        |t              r| j                   j                  }t        j                  j                  |j                  d|j                  dz  |z         t        j                  j                  |j                  j
                  |j                   j                  |z         t        j                  j                  |j                  j
                  |j                   j                  |z         nQt        |t               r-| j                   j                  }|j                  dz  d|j                   j"                  z  dz  z  |z  }|j                  dz  |z  }t        j                  j                  |j$                  j
                  |       t        j                  j                  |j&                  j
                  |       t        j                  j                  |j(                  j
                  |       t        j                  j                  |j*                  j
                  |       nt        |t,              r| j                   j                  }|j                   j.                  dz  d|j                   j"                  z  dz  z  |z  }d|j                   j.                  z  dz  |z  }t        j                  j                  |j0                  j
                  |       t        j                  j                  |j2                  j
                  |       n)t        |t4              rt        j                  j                  |j6                  j
                  |j8                  dz  | j                   j                  z         t        j                  j                  |j:                  j
                  |j<                  dz  | j                   j                  z         net        |t>              rdt        j                  j                  |j:                  j
                  | j                   j.                  dz  | j                   j                  z         nt        |t@              rdt        j                  j                  |j6                  j
                  | j                   j.                  dz  | j                   j                  z         n}t        |tB              rmt        j                  j                  |jD                  j
                  | j                   jF                  j.                  dz  | j                   j                  z         t        |t        jH                        rI|jJ                  j                  jM                          |j
                  j                  jO                  d       t        |t        jP                        r2|jJ                  %|jJ                  j                  jM                          yyy)	zInitialize the weightsr   g{Gz?)meanstdr   )r  r2   g      ?N))rb   initializer_factor
isinstancer   r   r   datanormal_r|   ra   r   initru   rp   rx   initializer_ranger   num_hidden_layersr   r   r   r   r   ro   r   r   	CLIPModeltext_projectiontext_embed_dimvisual_projectionvision_embed_dimCLIPVisionModelWithProjectionCLIPTextModelWithProjectionCLIPForImageClassification
classifiervision_configr   rh   zero_fill_r   )r\   r   factorin_proj_stdout_proj_stdfc_stds         r'   _init_weightsz!CLIPPreTrainedModel._init_weights  s   //f01""))..66CVd]6S%%,,1199sQU9V 45[[33FGGOOF22&BRBRTXBX[aBaObGGOOF2299v}}?^?^ag?gOhGGOOF55<<&--BaBadjBjOk.[[33F!++T1q6==;Z;Z7Z_c6cdgmmK",,d2f<LGGOOFMM00kOBGGOOFMM00kOBGGOOFMM00kOBGGOOFOO22OE([[33F!==44d:FMMDcDc@chl?lmpvvK&--333<vEFGGOOFJJ--6O:GGOOFJJ--;O?	*GGOO&&--))4/$++2P2PP   GGOO((//++T1DKK4R4RR    =>GGOO((//KK++T1DKK4R4RR    ;<GGOO&&--KK++T1DKK4R4RR    :;GGOO!!((KK--994?$++B`B``  
 fbll+KK""$MM$$S)fbii(V[[-DKK""$ .E(r)   N)rD   rE   rF   r   rI   base_model_prefixsupports_gradient_checkpointing_supports_sdpa_supports_flash_attn_supports_flex_attn_supports_attention_backendr  rK   r)   r'   r  r    s0    &*#N"&6%r)   r  c                        e Zd ZdZdef fdZ	 	 	 	 d
deej                     deej                     dee	   dee	   de
f
d	Z xZS )CLIPEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`CLIPEncoderLayer`].

    Args:
        config: CLIPConfig
    rb   c                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w )NF)
rm   rn   rb   r   
ModuleListranger  r   layersgradient_checkpointing)r\   rb   r   r   s      r'   rn   zCLIPEncoder.__init__  sU    mmuVMeMeGf$gGf!%5f%=Gf$gh&+# %hs   A#r   r   r   output_hidden_statesr   c                 6   ||n| j                   j                  }||n| j                   j                  }|rdnd}|rdnd}|}t        | j                        D ]*  \  }	}
|r||fz   } |
||||      }|d   }|s"||d   fz   }, |r||fz   }t        |||      S )a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Causal mask for the text model. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        NrK   )r   r   r   )rA   rB   rC   )rb   r   r,  	enumerater*  r   )r\   r   r   r   r   r,  encoder_statesall_attentionsrB   idxencoder_layerlayer_outputss               r'   r   zCLIPEncoder.forward  s    J 2C1N-TXT_T_TqTq$8$D $++JjJj 	  40d%"+DKK"8C#!/=2B!B)%"3	M *!,M !/=3C2E!E #9  +}.>>N+(%
 	
r)   NNNN)rD   rE   rF   rG   r   rn   r   r$   r   r   r   r   r   r   s   @r'   r&  r&    sy    ,z , 268<,0/3D
 !.D
  (5	D

 $D>D
 'tnD
 
D
r)   r&  c                        e Zd Zdef fdZe	 	 	 	 	 d
deej                     deej                     deej                     dee	   dee	   de
fd	       Z xZS )CLIPTextTransformerrb   c                     t         |           || _        |j                  }t	        |      | _        t        |      | _        t        j                  ||j                        | _        |j                  | _        y r   )rm   rn   rb   ro   r   r   r&  encoderr   r   r   final_layer_normeos_token_idr   s      r'   rn   zCLIPTextTransformer.__init__<  sa    &&	,V4"6* "YF<Q<Q R #//r)   r   r   ri   r   r,  r   c                 L   ||n| j                   j                  }||n| j                   j                  }|t        d      |j	                         }|j                  d|d         }| j                  ||      }t        ||j                  |j                        }|/| j                   j                  dk7  rt        ||j                        }| j                  |||||      }	|	j                  }
| j                  |
      }
| j                  dk(  rm|
t!        j"                  |
j$                  d   |
j                        |j'                  t         j(                  |
j                  	      j+                  d
      f   }n|
t!        j"                  |
j$                  d   |
j                        |j'                  t         j(                  |
j                  	      | j                  k(  j)                         j+                  d
      f   }t-        |
||	j.                  |	j0                        S )NzYou have to specify input_idsr3   )r   ri   r    r   )r   r   r   r   r,  r2   r   )r   r!   r   rA   pooler_outputrB   rC   )rb   r   r,  r   r   r   r   r   r   r!   r   r   r8  rA   r9  r:  r$   r%   r   r   r   argmaxr   rB   rC   )r\   r   r   ri   r   r,  input_shaperB   r   encoder_outputsrA   pooled_outputs               r'   r   zCLIPTextTransformer.forwardG  s    2C1N-TXT_T_TqTq$8$D $++JjJj 	 <==nn&NN2{27	),W !A,,]5I5I!

 %$++*J*JNa*a7H[H[\N+/<<')"7/!5 ,8 ,
 ,== 112CD! ..44Q7@Q@X@XY5995F5M5MNUUZ\U]_M ..44Q7@Q@X@XY EII6G6N6NOSWSdSddB!M */')77&11	
 	
r)   NNNNN)rD   rE   rF   r   rn   r   r   r$   r   r   r   r   r   r   s   @r'   r6  r6  ;  s    	0~ 	0  -115/3,0/3F
ELL)F
 !.F
 u||,	F

 $D>F
 'tnF
 
$F
 F
r)   r6  zI
    The text model from CLIP without any head or projection on top.
    c                        e Zd ZU eed<   ddgZdZdef fdZdej                  fdZ
d Zee	 	 	 	 	 dd	eej                      d
eej                      deej                      dee   dee   defd              Z xZS )CLIPTextModelrb   r   r   Fc                 d    t         |   |       t        |      | _        | j	                          y r   )rm   rn   r6  
text_model	post_initr   s     r'   rn   zCLIPTextModel.__init__  s&     -f5r)   r   c                 B    | j                   j                  j                  S r   rF  r   r   r_   s    r'   get_input_embeddingsz"CLIPTextModel.get_input_embeddings      ))999r)   c                 :    || j                   j                  _        y r   rI  r\   r   s     r'   set_input_embeddingsz"CLIPTextModel.set_input_embeddings      5:""2r)   r   r   ri   r   r,  c                 .    | j                  |||||      S )a9  
        Examples:

        ```python
        >>> from transformers import AutoTokenizer, CLIPTextModel

        >>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
        >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")

        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled (EOS token) states
        ```r   r   ri   r   r,  )rF  )r\   r   r   ri   r   r,  s         r'   r   zCLIPTextModel.forward  s)    4 )%/!5  
 	
r)   rB  )rD   rE   rF   r   rI   _no_split_modulesr"  rn   r   ModulerJ  rN  r   r   r   r$   r   r   r   r   r   r   s   @r'   rD  rD    s     -/AB ~ :bii :;  -115/3,0/3
ELL)
 !.
 u||,	

 $D>
 'tn
 
$
  
r)   rD  c                        e Zd Zdef fdZe	 	 	 	 d	deej                     dee	   dee	   dee	   de
f
d       Z xZS )
CLIPVisionTransformerrb   c                     t         |           || _        |j                  }t	        |      | _        t        j                  ||j                        | _	        t        |      | _        t        j                  ||j                        | _        y r   )rm   rn   rb   ro   ra   r   r   r   r   pre_layrnormr&  r8  post_layernormr   s      r'   rn   zCLIPVisionTransformer.__init__  sj    &&	.v6LL8M8MN"6* ll9&:O:OPr)   r   r   r,  r   r   c                    ||n| j                   j                  }||n| j                   j                  }|t        d      | j	                  ||      }| j                  |      }| j                  |||      }|j                  }|d d dd d f   }| j                  |      }t        |||j                  |j                        S )Nz You have to specify pixel_values)r   )r   r   r,  r   r<  )rb   r   r,  r   r   rW  r8  rA   rX  r   rB   rC   )	r\   r   r   r,  r   rB   r@  rA   rA  s	            r'   r   zCLIPVisionTransformer.forward  s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 ?@@Ogh))-8+/<<'/!5 ,8 ,
 ,==)!Q'2++M:)/')77&11	
 	
r)   NNNF)rD   rE   rF   r   rn   r   r   r$   rH   r   r   r   r   r   s   @r'   rU  rU    s{    Q/ Q  59,0/338!
u001!
 $D>!
 'tn	!

 #+4.!
 
$!
 !
r)   rU  zK
    The vision model from CLIP without any head or projection on top.
    c                        e Zd ZU eed<   dZdgZdef fdZdej                  fdZ
ee	 	 	 	 ddeej                     dee   dee   d	edef
d
              Z xZS )CLIPVisionModelrb   r   r   c                 d    t         |   |       t        |      | _        | j	                          y r   )rm   rn   rU  vision_modelrG  r   s     r'   rn   zCLIPVisionModel.__init__  s'     1&9r)   r   c                 B    | j                   j                  j                  S r   r^  r   rx   r_   s    r'   rJ  z$CLIPVisionModel.get_input_embeddings        ++;;;r)   r   r,  r   c                 ,    | j                  ||||      S )a  
        Example:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, CLIPVisionModel

        >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled CLS states
        ```r   r   r,  r   )r^  )r\   r   r   r,  r   s        r'   r   zCLIPVisionModel.forward  s(    <   %/!5%=	 ! 
 	
r)   rZ  )rD   rE   rF   r   rI   main_input_namerR  rn   r   rS  rJ  r   r   r   r$   rH   r   r   r   r   r   s   @r'   r\  r\    s     $O+,/ <bii <  59,0/3).!
u001!
 $D>!
 'tn	!

 #'!
 
$!
  !
r)   r\  c                       e Zd ZU eed<   g dZdZdef fdZ e       e		 	 dde
j                  dee
j                     dee
j                     de
j                  fd	              Z e       e		 dd
e
j                  dede
j                  fd              Zee		 	 	 	 	 	 	 	 ddee
j$                     d
ee
j                     dee
j                     dee
j$                     dee   dee   dee   dedefd              Z xZS )r  rb   )r   r   ra   Fc                    t         |   |       t        |j                  t              s"t        dt        |j                         d      t        |j                  t              s"t        dt        |j                         d      |j                  }|j                  }|j                  | _	        |j                  | _        |j                  | _        t        j                  |      }|j                  | _        t         j                  |      }|j"                  | _        t%        j&                  | j                  | j                  d      | _        t%        j&                  | j                  | j                  d      | _        t%        j,                  t/        j0                  | j2                  j4                              | _        | j9                          y )NzKconfig.text_config is expected to be of type CLIPTextConfig but is of type .zOconfig.vision_config is expected to be of type CLIPVisionConfig but is of type Frh   )rm   rn   r  text_configr   	TypeErrortyper  r   projection_dimro   r  r  rD  _from_configrF  r\  r^  r   r   r  r  rs   r$   r0   rb   logit_scale_init_valuelogit_scalerG  )r\   rb   ri  r  rF  r^  r   s         r'   rn   zCLIPModel.__init__:  sx    &,,n=++,-Q0 
 &..0@A--./q2 
 ((,,$33)55 - 9 9"//<
$//&33MB(55!#4+@+@$BUBU\a!b!yy)<)<d>Q>QX]^<<T[[5W5W(XY 	r)   r   r   ri   r   c                 h    | j                  |||      }|j                  }| j                  |      }|S )a  
        Returns:
            text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
            applying the projection layer to the pooled output of [`CLIPTextModel`].

        Examples:

        ```python
        >>> import torch
        >>> from transformers import AutoTokenizer, CLIPModel

        >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
        >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")

        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")

        >>> with torch.inference_mode():
        ...     text_features = model.get_text_features(**inputs)
        ```)r   r   ri   )rF  r=  r  )r\   r   r   ri   text_outputsrA  text_featuress          r'   get_text_featureszCLIPModel.get_text_features]  sD    6 48??)% 4C 4

 %22,,];r)   r   r   c                 f    | j                  ||      }|j                  }| j                  |      }|S )ai  
        Returns:
            image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
            applying the projection layer to the pooled output of [`CLIPVisionModel`].

        Examples:

        ```python
        >>> import torch
        >>> from transformers import AutoProcessor, CLIPModel
        >>> from transformers.image_utils import load_image

        >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = load_image(url)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> with torch.inference_mode():
        ...     image_features = model.get_image_features(**inputs)
        ```)r   r   )r^  r=  r  )r\   r   r   vision_outputsrA  image_featuress         r'   get_image_featureszCLIPModel.get_image_features  sC    < 6:5F5F%%= 6G 6
 '44//>r)   return_lossr   r,  c	           	         ||n| j                   j                  }||n| j                   j                  }| j                  ||||      }	| j	                  |||||      }
|	j
                  }| j                  |      }|
j
                  }| j                  |      }|t        |      z  }|t        |      z  }t        j                  ||j                         j                  |j                              }|| j                  j                         j                  |j                        z  }|j                         }d}|rt!        |      }t#        ||||||
|	      S )a  
        return_loss (`bool`, *optional*):
            Whether or not to return the contrastive loss.

        Examples:

        ```python
        >>> import torch
        >>> from transformers import AutoProcessor, CLIPModel
        >>> from transformers.image_utils import load_image

        >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = load_image(url)

        >>> inputs = processor(
        ...     text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
        ... )

        >>> with torch.inference_mode():
        ...     outputs = model(**inputs)
        >>> logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
        >>> probs = logits_per_image.softmax(dim=1)  # we can take the softmax to get the label probabilities
        ```Nrc  rQ  )rQ   rR   rS   rN   r@   rT   rU   )rb   r   r,  r^  rF  r=  r  r  r<   r$   r   r,   r   r!   ro  expr/   rP   )r\   r   r   r   ri   rx  r   r,  r   ru  rq  r@   rN   rS   rR   rQ   s                   r'   r   zCLIPModel.forward  su   P 2C1N-TXT_T_TqTq$8$D $++JjJj 	 6:5F5F%/!5%=	 6G 6
 48??)%/!5 4C 4
 &33--l;"00**;7 $&6|&DD!$4[$AA  ,,{LNN4D4G4GHZHZ4[\)D,<,<,@,@,B,E,EkFXFX,YY*,,._-D-+#%* .
 	
r)   )NNr   )NNNNNNNF)rD   rE   rF   r   rI   rR  r"  rn   r   r   r$   r   r   rH   rs  r   rw  r   r   rP   r   r   r   s   @r'   r  r  4  s   Z !z !F %& 26/3	!<<! !.! u||,	!
 
		!  '!F %& */#''# #'# 
			#  '#J  15481537&*,0/3).V
E,,-V
 u001V
 !.	V

 u//0V
 d^V
 $D>V
 'tnV
 #'V
 
V
  V
r)   r  c                        e Zd ZU eed<   dZddgZdef fdZdej                  fdZ
d Zee	 	 	 	 	 dd	eej                      d
eej                      deej                      dee   dee   defd              Z xZS )r  rb   Fr   r   c                     t         |   |       t        j                  |      }|j                  | _        t        j                  |j                  |j                  d      | _	        | j                          y NFrh  )rm   rn   rD  rm  rF  r   r   ro   rl  r  rG  )r\   rb   rF  r   s      r'   rn   z$CLIPTextModelWithProjection.__init__  s[     "//7
$//!yy););V=R=RY^_ 	r)   r   c                 B    | j                   j                  j                  S r   rI  r_   s    r'   rJ  z0CLIPTextModelWithProjection.get_input_embeddings  rK  r)   c                 :    || j                   j                  _        y r   rI  rM  s     r'   rN  z0CLIPTextModelWithProjection.set_input_embeddings  rO  r)   r   r   ri   r   r,  c                     | j                  |||||      }|j                  }| j                  |      }t        ||j                  |j
                  |j                        S )a@  
        Examples:

        ```python
        >>> import torch
        >>> from transformers import AutoTokenizer, CLIPTextModelWithProjection

        >>> model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
        >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")

        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")

        >>> with torch.inference_mode():
        ...     outputs = model(**inputs)
        >>> text_embeds = outputs.text_embeds
        ```rQ  )rN   rA   rB   rC   )rF  r=  r  rM   rA   rB   rC   )	r\   r   r   ri   r   r,  rq  rA  rN   s	            r'   r   z#CLIPTextModelWithProjection.forward  so    6 48??)%/!5 4C 4
 %22**=9"#*<<&44#..	
 	
r)   rB  )rD   rE   rF   r   rI   r"  rR  rn   r   rS  rJ  rN  r   r   r   r$   r   r   rM   r   r   r   s   @r'   r  r    s     -/AB	~ 	:bii :;  -115/3,0/3(
ELL)(
 !.(
 u||,	(

 $D>(
 'tn(
 
(
  (
r)   r  c                        e Zd ZU eed<   dZdef fdZdej                  fdZ	e
e	 	 	 	 d
deej                     dee   dee   dedef
d	              Z xZS )r  rb   r   c                     t         |   |       t        j                  |      }|j                  | _        t        j                  |j                  |j                  d      | _	        | j                          y r}  )rm   rn   r\  rm  r^  r   r   ro   rl  r  rG  r\   rb   r^  r   s      r'   rn   z&CLIPVisionModelWithProjection.__init__N  s\     &33F;(55!#6+=+=v?T?T[`!a 	r)   r   c                 B    | j                   j                  j                  S r   r`  r_   s    r'   rJ  z2CLIPVisionModelWithProjection.get_input_embeddingsY  ra  r)   r   r,  r   c                     | j                  ||||      }|j                  }| j                  |      }t        ||j                  |j
                  |j                        S )a  
        Examples:

        ```python
        >>> import torch
        >>> from transformers import AutoProcessor, CLIPVisionModelWithProjection
        >>> from transformers.image_utils import load_image

        >>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = load_image(url)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> with torch.inference_mode():
        ...     outputs = model(**inputs)
        >>> image_embeds = outputs.image_embeds
        ```rc  )r@   rA   rB   rC   )r^  r=  r  r?   rA   rB   rC   )r\   r   r   r,  r   ru  rA  r@   s           r'   r   z%CLIPVisionModelWithProjection.forward\  sn    < 6:5F5F%/!5%=	 6G 6
 '44--m<$%,>>(66%00	
 	
r)   rZ  )rD   rE   rF   r   rI   rd  rn   r   rS  rJ  r   r   r   r$   rH   r   r?   r   r   r   s   @r'   r  r  I  s    $O	/ 	<bii <  59,0/3).*
u001*
 $D>*
 'tn	*

 #'*
 
*
  *
r)   r  z
    CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
    the patch tokens) e.g. for ImageNet.
    c                        e Zd ZdZdeddf fdZee	 	 	 	 d
dee	j                     dee	j                     dee   dee   def
d	              Z xZS )r  r   rb   r   Nc                 ~   t         |   |       |j                  | _        t        j	                  |j
                        }|j                  | _        |j                  dkD  r4t        j                  |j
                  j                  |j                        nt        j                         | _        | j                          y )Nr   )rm   rn   
num_labelsr\  rm  r  r^  r   r   ro   Identityr  rG  r  s      r'   rn   z#CLIPForImageClassification.__init__  s      ++&33F4H4HI(55 OUN_N_bcNcBIIf**668I8IJikititiv 	
 	r)   labelsr   r,  c                    ||n| j                   j                  }||n| j                   j                  }| j                  |||      }|j                  }t        j                  |ddddddf   d      }| j                  |      }d}|| j                  ||| j                         }t        |||j                  |j                        S )a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        N)r   r,  r   r   )rQ   r   rB   rC   )rb   r   r,  r^  rA   r$   r  r  loss_functionr   rB   rC   )	r\   r   r  r   r,  r   sequence_outputr   rQ   s	            r'   r   z"CLIPForImageClassification.forward  s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 /3.?.?/!5 /@ /
 "33  **_QAX%>AF1%%ffdkkBD$!//))	
 	
r)   r4  )rD   rE   rF   rd  r   rn   r   r   r   r$   r   r   r   r   r   r   s   @r'   r  r    s     %Oz d   04)-,0/3(
u||,(
 &(
 $D>	(

 'tn(
 
(
  (
r)   r  )r  r  rD  r  r\  r  r  )r   T)ArG   dataclassesr   typingr   r   r   r   r$   r   activationsr
   modeling_attn_mask_utilsr   r   modeling_layersr   modeling_outputsr   r   r   modeling_utilsr   r   utilsr   r   r   r   r   r   configuration_clipr   r   r   
get_loggerrD   loggerr   r(   r/   r<   r?   rM   rP   rS  ra   r   floatr   r   r   r   r   r  r&  r6  rD  rU  r\  r  r  r  r  __all__rK   r)   r'   <module>r     s    ! 1 1   ! d 9 b b F w w L L 
		H	%
`U\\ `ell `-%,, -5<< -U\\ ell  
	?K 	? 	? 
	?+ 	? 	?  
  
   
FP299 Pf% %^ "%II%<<% 
% <<	%
 U\\*% % % %0H)BII H)Vbii /1 /d ?%/ ?% ?%DS
")) S
lS
")) S
l 
2
' 2

2
j-
BII -
` 
1
) 1

1
h L
# L
 L
^ A
"5 A
 A
H >
$7 >
 >
B <
!4 <
<
~r)   