
    <h!                        S SK JrJrJrJr  SSKJrJrJrJ	r	J
r
  SSKJrJr  \" 5       (       a  SSKJr  \" 5       (       a  S SKrSSKJrJr  \(       a  S S	KJr  \	R,                  " \5      r\" \" S
S95       " S S\5      5       rg)    )TYPE_CHECKINGAnyUnionoverload   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )Pipelinebuild_pipeline_init_args)
load_imageN)(MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES)ImageT)has_image_processorc                   J  ^  \ rS rSrSrSrSrSrSrU 4S jr	S r
\S\\S	4   S
\S\S\\\\4      4S j5       r\S\\\   \S	   4   S
\S\S\\\\\4         4S j5       rS\\\\\4      \\\\\4         4   4U 4S jjrSS jrS rSS jrSSS\\\4   4S jrSrU =r$ )ObjectDetectionPipeline   a  
Object detection pipeline using any `AutoModelForObjectDetection`. This pipeline predicts bounding boxes of objects
and their classes.

Example:

```python
>>> from transformers import pipeline

>>> detector = pipeline(model="facebook/detr-resnet-50")
>>> detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
[{'score': 0.997, 'label': 'bird', 'box': {'xmin': 69, 'ymin': 171, 'xmax': 396, 'ymax': 507}}, {'score': 0.999, 'label': 'bird', 'box': {'xmin': 398, 'ymin': 105, 'xmax': 767, 'ymax': 507}}]

>>> # x, y  are expressed relative to the top left hand corner.
```

Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"object-detection"`.

See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=object-detection).
FTNc                   > [         TU ]  " U0 UD6  U R                  S:X  a  [        SU R                   S35      e[        U S5        [        R                  " 5       nUR                  [        5        U R                  U5        g )NtfzThe z is only available in PyTorch.vision)super__init__	framework
ValueError	__class__r   r   copyupdater   check_model_type)selfargskwargsmappingr   s       _/var/www/html/shao/venv/lib/python3.13/site-packages/transformers/pipelines/object_detection.pyr    ObjectDetectionPipeline.__init__8   sn    $)&)>>T!tDNN#33QRSS$):??ACDg&    c                 L    0 nSU;   a  US   US'   0 nSU;   a  US   US'   U0 U4$ )Ntimeout	threshold )r#   r%   preprocess_paramspostprocess_kwargss       r'   _sanitize_parameters,ObjectDetectionPipeline._sanitize_parametersC   sL    +1)+<i(& .4[.A{+ "&888r)   imagezImage.Imager$   r%   returnc                     g Nr-   r#   r2   r$   r%   s       r'   __call__ ObjectDetectionPipeline.__call__L   s    mpr)   c                     g r5   r-   r6   s       r'   r7   r8   O   s     &)r)   c                 d   > SU;   a  SU;  a  UR                  S5      US'   [        TU ]  " U0 UD6$ )a  
Detect objects (bounding boxes & classes) in the image(s) passed as inputs.

Args:
    inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
        The pipeline handles three types of images:

        - A string containing an HTTP(S) link pointing to an image
        - A string containing a local path to an image
        - An image loaded in PIL directly

        The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
        same format: all as HTTP(S) links, all as local paths, or all as PIL images.
    threshold (`float`, *optional*, defaults to 0.5):
        The probability necessary to make a prediction.
    timeout (`float`, *optional*, defaults to None):
        The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
        the call may block forever.

Return:
    A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single
    image, will return a list of dictionaries, if the input is a list of several images, will return a list of
    list of dictionaries corresponding to each image.

    The dictionaries contain the following keys:

    - **label** (`str`) -- The class label identified by the model.
    - **score** (`float`) -- The score attributed by the model for that label.
    - **box** (`list[dict[str, int]]`) -- The bounding box of detected object in image's original size.
imagesinputs)popr   r7   )r#   r$   r%   r   s      r'   r7   r8   T   s=    @ v(&"8%zz(3F8w000r)   c                 :   [        XS9n[        R                  " UR                  UR                  //5      nU R                  U/SS9nU R                  S:X  a  UR                  U R                  5      nU R                  b  U R                  US   US   SS9nX4S'   U$ )N)r+   pt)r;   return_tensorswordsboxes)textrB   r@   target_size)
r   torch	IntTensorheightwidthimage_processorr   totorch_dtype	tokenizer)r#   r2   r+   rD   r<   s        r'   
preprocess"ObjectDetectionPipeline.preprocessx   s    52ooekk'B&CD%%eWT%J>>T!YYt//0F>>%^^w`d^eF +}r)   c                     UR                  S5      nU R                  " S0 UD6nUR                  SU0UE5      nU R                  b  US   US'   U$ )NrD   bboxr-   )r=   modelr   rL   )r#   model_inputsrD   outputsmodel_outputss        r'   _forward ObjectDetectionPipeline._forward   s\    "&&}5**,|,))=+*Q*QR>>%$0$8M&!r)   c           	      <  ^ ^^ US   nT R                   Gb  US   R                  5       u  mmUU U4S jnUS   R                  S5      R                  SS9R	                  SS9u  pVUR                  5        Vs/ sH&  nT R
                  R                  R                  U   PM(     nnUS   R                  S5       V	s/ sH
  o" U	5      PM     n
n	/ SQn[        UR                  5       X5       Vs/ sH!  oS   U:  d  M  [        [        X5      5      PM#     nnU$ T R                  R                  XU5      nUS   nUS	   nUS
   nUS   n
UR                  5       US	'   U Vs/ sH4  nT R
                  R                  R                  UR                  5          PM6     snUS
'   U
 Vs/ sH  nT R                  U5      PM     snUS'   / SQn[        US	   US
   US   5       Vs/ sH  n[        [        X5      5      PM     nnU$ s  snf s  sn	f s  snf s  snf s  snf s  snf )NrD   r   c           
         > TR                  [        R                  " TU S   -  S-  TU S   -  S-  TU S   -  S-  TU S   -  S-  /5      5      $ )Nr   i  r   r      )_get_bounding_boxrE   Tensor)rP   rG   r#   rH   s    r'   unnormalize8ObjectDetectionPipeline.postprocess.<locals>.unnormalize   si    --LL"T!W_t3#d1g-4"T!W_t3#d1g-4		 	r)   logits)dimrP   )scorelabelboxscoreslabelsrB   )rL   tolistsqueezesoftmaxmaxrQ   configid2labelzipdictrI   post_process_object_detectionitemrZ   )r#   rT   r,   rD   r\   rd   classes
predictionre   rP   rB   keysvals
annotationraw_annotationsraw_annotationrb   rc   rG   rH   s   `                 @@r'   postprocess#ObjectDetectionPipeline.postprocess   s.   #M2>>% (N113MFE
 ,H5==a@HHRHPTTY[T\OFOV~~O_`O_djj''00<O_F`3@3H3P3PQR3ST3S4[&3SET,D<?QW<_w<_Dhicjmvcv/$s4/<_Jw( # #22PPQ^kvwO,Q/N#H-F#H-F"7+E'-}}N8$^d'e^dUZ

(9(9(B(B5::<(P^d'eN8$NS&Test'='=c'Be&TN7# -D  x 8.:RTbcjTkllD S_%l  
 / aTw (f&Ts*   7,H ;H-H
<H
:H#HHrc   ztorch.Tensorc                     U R                   S:w  a  [        S5      eUR                  5       R                  5       u  p#pEUUUUS.nU$ )z
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }

Args:
    box (`torch.Tensor`): Tensor containing the coordinates in corners format.

Returns:
    bbox (`dict[str, int]`): Dict containing the coordinates in corners format.
r?   z9The ObjectDetectionPipeline is only available in PyTorch.)xminyminxmaxymax)r   r   intrf   )r#   rc   rz   r{   r|   r}   rP   s          r'   rZ   )ObjectDetectionPipeline._get_bounding_box   sO     >>T!XYY!$!1!1!3D	
 r)   r-   r5   )g      ?)__name__
__module____qualname____firstlineno____doc___load_processor_load_image_processor_load_feature_extractor_load_tokenizerr   r0   r   r   strr   listrm   r7   rM   rU   rw   r~   rZ   __static_attributes____classcell__)r   s   @r'   r   r      s-   0 O #O	'9 peC$67ppspW[\`adfiai\jWkp p)49d=&99:)CF)RU)	d4S>"	#) )"15d38n1EtDQUVY[^V^Q_L`Ga1a+b "1H	+Z^ S#X  r)   r   )typingr   r   r   r   utilsr   r	   r
   r   r   baser   r   image_utilsr   rE   models.auto.modeling_autor   r   PILr   
get_loggerr   loggerr   r-   r)   r'   <module>r      sw    6 6 k k 4 ( 
 			H	% ,FGqh q Hqr)   