
    <hh                         S SK JrJr  SSKJrJr  SSKJrJrJ	r	  \" 5       (       a  S SK
Jr  SSKJr  \" \	" SS	9S
5       " S S\5      5       rg)    )AnyUnion   )add_end_docstringsis_vision_available   )GenericTensorPipelinebuild_pipeline_init_args)Image)
load_imageT)has_image_processora  
        image_processor_kwargs (`dict`, *optional*):
                Additional dictionary of keyword arguments passed along to the image processor e.g.
                {"size": {"height": 100, "width": 100}}
        pool (`bool`, *optional*, defaults to `False`):
            Whether or not to return the pooled output. If `False`, the model will return the raw hidden states.
    c                      ^  \ rS rSrSrSrSrSrSrSS jr	SS\
\\4   4S jjrS rSS	 jrS
\\S\S   \\   4   S\S\\   4U 4S jjrSrU =r$ )ImageFeatureExtractionPipeline   a  
Image feature extraction pipeline uses no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.

Example:

```python
>>> from transformers import pipeline

>>> extractor = pipeline(model="google/vit-base-patch16-224", task="image-feature-extraction")
>>> result = extractor("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png", return_tensors=True)
>>> result.shape  # This is a tensor of shape [1, sequence_lenth, hidden_dimension] representing the input image.
torch.Size([1, 197, 768])
```

Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

This image feature extraction pipeline can currently be loaded from [`pipeline`] using the task identifier:
`"image-feature-extraction"`.

All vision models may be used for this pipeline. See a list of all models, including community-contributed models on
[huggingface.co/models](https://huggingface.co/models).
FTc                 V    Uc  0 OUn0 nUb  X6S'   Ub  X&S'   SU;   a  US   US'   U0 U4$ )Npoolreturn_tensorstimeout )selfimage_processor_kwargsr   r   kwargspreprocess_paramspostprocess_paramss          g/var/www/html/shao/venv/lib/python3.13/site-packages/transformers/pipelines/image_feature_extraction.py_sanitize_parameters3ImageFeatureExtractionPipeline._sanitize_parameters5   sY    "8"@BF\)-v&%3A/0+1)+<i( "&888    returnc                     [        XS9nU R                  " U4SU R                  0UD6nU R                  S:X  a  UR                  U R                  5      nU$ )N)r   r   pt)r   image_processor	frameworktotorch_dtype)r   imager   r   model_inputss        r   
preprocess)ImageFeatureExtractionPipeline.preprocessC   sQ    52++Ek$..kTjk>>T!'??4+;+;<Lr   c                 *    U R                   " S0 UD6nU$ )Nr   )model)r   r(   model_outputss      r   _forward'ImageFeatureExtractionPipeline._forwardJ   s    

2\2r   c                    Ub  UOSnU(       a  SU;  a  [        S5      eUS   nOUS   nU(       a  U$ U R                  S:X  a  UR                  5       $ U R                  S:X  a  UR                  5       R                  5       $ g )NFpooler_outputzeNo pooled output was returned. Make sure the model has a `pooler` layer when using the `pool` option.r   r"   tf)
ValueErrorr$   tolistnumpy)r   r-   r   r   outputss        r   postprocess*ImageFeatureExtractionPipeline.postprocessN   s    'tUm3 {  $O4G $A&GN>>T!>>##^^t#==?))++ $r   argszImage.Imager   c                 $   > [         TU ]  " U0 UD6$ )aG  
Extract the features of the input(s).

Args:
    images (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
        The pipeline handles three types of images:

        - A string containing a http link pointing to an image
        - A string containing a local path to an image
        - An image loaded in PIL directly

        The pipeline accepts either a single image or a batch of images, which must then be passed as a string.
        Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL
        images.
    timeout (`float`, *optional*, defaults to None):
        The maximum time in seconds to wait for fetching images from the web. If None, no timeout is used and
        the call may block forever.
Return:
    A nested list of `float`: The features computed by the model.
)super__call__)r   r9   r   	__class__s      r   r<   'ImageFeatureExtractionPipeline.__call__b   s    * w000r   r   )NNN)N)NF)__name__
__module____qualname____firstlineno____doc___load_processor_load_image_processor_load_feature_extractor_load_tokenizerr   dictstrr	   r)   r.   r7   r   listr   r<   __static_attributes____classcell__)r=   s   @r   r   r      s    0 O #O94PSUbPbKc ,(1eC]8KTRUY$VW 1cf 1kopskt 1 1r   r   N)typingr   r   utilsr   r   baser	   r
   r   PILr   image_utilsr   r   r   r   r   <module>rR      sR     ; C C ( 6	`1X `1	`1r   