
    ht>                         d dl mZmZ d dlZddlmZ ddlmZm	Z	m
Z
 ddlmZmZmZmZmZ ddlmZmZ ddlmZ  G d	 d
ed      Z G d ded      Z G d de      ZdgZy)    )OptionalUnionN   )BatchFeature)
ImageInputconcatenate_listmake_flat_list_of_images)ImagesKwargsMultiModalDataProcessingKwargsProcessorMixinUnpack)PreTokenizedInput	TextInput)
VideoInputc                   >    e Zd ZU ee   ed<   ee   ed<   ee   ed<   y)InternVLImagesKwargscrop_to_patchesmin_patchesmax_patchesN)__name__
__module____qualname__r   bool__annotations__int     o/var/www/html/aiagenthome/venv/lib/python3.12/site-packages/transformers/models/internvl/processing_internvl.pyr   r      s     d^###r   r   F)totalc                   2    e Zd ZU eed<   dddddiddid	Zy
)InternVLProcessorKwargsimages_kwargsleftF)padding_sidereturn_mm_token_type_idsr   Treturn_tensorspt)text_kwargsr#   videos_kwargsN)r   r   r   r   r   	_defaultsr   r   r   r"   r"   !   s4    '' #(-

 t
 d
Ir   r"   c                   &    e Zd ZdZg dZdZdZdZ	 	 	 	 	 ddef fdZ	de
e   d	e
e   d
e
e   dej                  dej                  dej                  fdZ	 	 	 	 ddee   deeeee
e   e
e   f      dee   dee   def
dZddZed        Z xZS )InternVLProcessoraM  
    Constructs a InternVL processor which wraps a [`AutoImageProcessor`] and
    [`PretrainedTokenizerFast`] tokenizer into a single processor that inherits both the image processor and
    tokenizer functionalities. See the [`~InternVLProcessor.__call__`] and [`~InternVLProcessor.decode`] for more information.
    Args:
        image_processor ([`AutoImageProcessor`], *optional*):
            The image processor is a required input.
        tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`], *optional*):
            The tokenizer is a required input.
        video_processor ([`AutoVideoProcessor`], *optional*):
            The video processor is a required input.
        image_seq_length (`int`, *optional*, defaults to 256):
            The number of image token to use per image patch. it should be set so that:
            image_seq_length = (config.image_size // config.patch_size) ** 2 * (config.scale_factor**2)
        chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
            in a chat into a tokenizable string.
    )image_processor	tokenizervideo_processorAutoImageProcessorAutoVideoProcessorAutoTokenizerimage_seq_lengthc                 z   || _         |j                  | _        |j                  | _        |j                  | _        |j                  | _        |j
                  | _        |j                  | _        |j                  | _	        | j                  | j                  | j                  g| _
        t        | 0  |||fd|i| y )Nchat_template)r4   start_image_tokenend_image_tokenstart_image_token_idend_image_token_idcontext_image_tokenimage_tokenvideo_tokencontext_image_token_idimage_token_id	image_idssuper__init__)selfr.   r/   r0   r4   r6   kwargs	__class__s          r   rB   zInternVLProcessor.__init__I   s     !1!*!<!<(88$-$B$B!"+">">$88$00'>>--t/H/H$JaJab)_lTaleklr   textimage_num_patchesvideo_num_patchesimage_num_patches_indicesvideo_num_patches_indicesvideo_patch_indicesc	           	      0    d}	d}
g }g }g }|D ]  }|} j                   |v s j                  |v r j                   |v r؉ j                  |vs7|j                   j                         |j                   j                        k  r|	dkD  r||	dz
     nd}||	   }|j                  |||        |j	                   j                   dd      }|j                   j
                    j                    j                  z  ||	   z    j                          |	dz  }	n||
   }||
dz      }||   }||   }|j                  |||        t        |||       dj                   fdt        t                    D              }|j                  |       |j	                   j                  dd      }|
dz  }
 j                   |v r j                  |v rd|v r)|j                  d      }|j	                  d|d      }d|v r)|j                  |        |||	|
fS )z
        Processes interleaved text with <image> and <video> placeholders, replacing them with appropriate
        image and video tokens while keeping track of the patches used.
        r      z<placeholder>
c              3      K   | ]D  }d |dz    dj                    j                  j                  z  |   z   j                    F yw)FramerM   z: N)r7   r<   r4   r8   ).0inum_patchesrC   s     r   	<genexpr>z?InternVLProcessor._insert_media_placeholders.<locals>.<genexpr>   ss      -!8A  Awb)?)?(@AQAQTXTiTiAilwxylzAz@{  }A  }Q  }Q  |R  S!8s   A
A)r<   r=   indexappendreplacer7   r4   r8   listjoinrangelenpop)rC   rF   image_pixel_valuesvideo_pixel_valuesrG   rH   rI   rJ   rK   image_indexvideo_indexprocessed_textimage_video_patchesreplace_stringsprompt
new_promptstart_index	end_indexcurrent_patch_indexend_patch_indexvideo_promptreplace_strrS   s   `                     @r   _insert_media_placeholdersz,InternVLProcessor._insert_media_placeholders^   s      FJ""j0D4D4D
4R##z1$$J6!''(8(89J<L<LTM]M]<^^ Q\^_P_";K!O"LefK 9+ FI'../A+i/XY!+!3!3D4D4DoWX!YJ#**11243C3CdF[F[3[^op{^|3|2}  C  S  S  ~T  U  1$K
 +>k*J'&9+/&JO";<O"PK 9/ JI'../A+i/XY"&'89L_']"^K#'99 -!&s;'7!8- $L $**<8!+!3!3D4D4DoWX!YJ1$KA ""j0D4D4D
4RB "Z/-11!4'//aP
 "Z/ !!*-M P 2KLLr   imagesvideosrD   returnc           
      `   |t        d       | j                  t        fd| j                  j                  i|}t        |t        t        f      s|g}g }d}t        j                  dg      }	|t| j                  j                  |      }t        |      } | j                  dd|i|d   }
|
j                  d      }|
j                  d      }t        j                  |      }	g }d}t        j                  dg      }t        j                  dg      }||d	   } | j                  dd
|i|}|j                  d      }|j                   ^}}}t        j"                  ||      }t%        |      }t        j&                  |dz   t(              }d|d<   t        j                  |      |dd dg|z  }t        j&                  |dz   t(              }d|d<   t        j                  |      |dd |j+                  dd      }i }||`| j-                  ||||||	||      \  }}}}||t/        |      k7  rt        d      ||t/              k7  rt        d      dt1        |      i}|d   j                  dd      }|d   j                  dd      } | j                  |fi |d   }| j3                  ||dg       |rft        j                  |d         }t        j4                  |d         }d|t        j6                  || j8                        <   |j;                         |d<   t=        i |||      S )a	  
        Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
        and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] to encode the text if `text`
        is not `None`, otherwise encode default OCR queries which depends on the `format`, `box`, `color`, `multi_page` and
        `crop_to_patches` arguments. To prepare the vision inputs, this method forwards the `images` and `kwargs` arguments to
        GotOcr2ImageProcessor's [`~GotOcr2ImageProcessor.__call__`] if `images` is not `None`.

        Args:
            images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
                The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
                tensor. Both channels-first and channels-last formats are supported.
            text (`str`, `list[str]`, `list[list[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
                The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors of a particular framework. Acceptable values are:
                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return NumPy `np.ndarray` objects.
                - `'jax'`: Return JAX `jnp.ndarray` objects.

        Returns:
            [`BatchFeature`]: A [`BatchFeature`] with the following fields:

            - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
            - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
              `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
              `None`).
            - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
        NzYou have to specify text.tokenizer_init_kwargsr   rm   r#   rS   pixel_valuesr*   rn   pixel_values_videosrM   zONumber of image placeholders in the prompt does not match the number of images.zONumber of video placeholders in the prompt does not match the number of videos.r)   r'   r&   image)
modalities	input_idsmm_token_type_ids)datatensor_typer   )
ValueError_merge_kwargsr"   r/   init_kwargs
isinstancerX   tuplenparrayr.   fetch_imagesr	   r\   cumsumr0   shapefullsumemptyr   flattenrl   r[   r   _check_special_mm_tokens
zeros_likeisinr@   tolistr   )rC   rm   rF   audiorn   rD   output_kwargsrG   r]   rI   image_inputsrH   r^   rK   rJ   video_kwargsvideo_inputs
batch_size
num_frames_num_frames_per_videoimage_videos_inputsrb   r_   r`   r'   r&   text_inputs	array_idsrw   s                                 r   __call__zInternVLProcessor.__call__   s   R <899***#
"&.."<"<
 
 $u.6D !$&HHaSM!))66v>F-f5F/4//`v`A_`L , 0 0 ?!-!1!1.!A(*		2C(D%! hhsm$&HHaSM!(9L/4//NvNNL!-!1!12G!H);)A)A&J
Q#%77:z#B 12J"$((:>3"?%&"&(ii0D&E#!"j 0(*a(E%+,%a(,.II6G,H%ab)!3!;!;Aq!A !3BFBaBa""!!))#	C?D%{K !kS[&@ !rss!kS9M5N&N !rss $23CDW3X"Y&}599:JDQ#0#?#C#CD^`d#e $dnnTJ]=-IJ%%dKWI%N#[!9:I "k+.F GDEbggi@A/@/G/G/IK+,!GK!G3F!GUcddr   c                 R   i }|t         j                  j                  di       }|j                  |       |D cg c]   } | j                  j
                  g || " }}|D cg c]  }d| j                  |z  z    }}|j                  ||d       t        di |S c c}w c c}w )a  
        Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.

        Args:
            image_sizes (`list[list[int]]`, *optional*):
                The input sizes formatted as (height, width) per each image.

        Returns:
            `MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
            input modalities, along with other useful data.
        r#      )num_image_tokensnum_image_patchesr   )r"   r+   getupdater.   get_number_of_image_patchesr4   r   )	rC   image_sizesrD   vision_datar#   
image_sizer   rS   r   s	            r   _get_num_multimodal_tokensz,InternVLProcessor._get_num_multimodal_tokens  s     "3==AA/SUVM  ( #.!"-J A$$@@\*\m\"-  !
 ^oo]nkT%:%:[%H I]no4D[lmn,,,!
  ps   %B%B$c                 d    | j                   j                  }| j                  j                  }||z   S N)r/   model_input_namesr.   )rC   tokenizer_input_namesimage_processor_input_namess      r   r   z#InternVLProcessor.model_input_names1  s2     !% @ @&*&:&:&L&L#$'BBBr   )NNN   N)NNNNr   )r   r   r   __doc__
attributesimage_processor_classvideo_processor_classtokenizer_classr   rB   rX   strr   ndarrayrl   r   r   r   r   r   r   r   r"   r   r   r   propertyr   __classcell__)rE   s   @r   r-   r-   1   s;   $ EJ00%O  #m
 m*>M3i>M
  9>M  9>M $&::>M $&::>M  ZZ>MD (,hl'+ue$ue uY(94	?DQbLccdeue
 $ue 01ue 
uen-8 C Cr   r-   )typingr   r   numpyr   image_processing_utilsr   image_utilsr   r   r	   processing_utilsr
   r   r   r   r   tokenization_utils_baser   r   video_utilsr   r   r"   r-   __all__r   r   r   <module>r      sZ     #  2 Q Q f f C %<u .e  EC ECP 
r   