o
    
sh&                     @   s   d dl mZmZmZ d dlZddlmZmZm	Z	m
Z
mZ ddlmZmZ e	 r3d dlmZ ddlmZ e rBdd	lmZmZmZmZ e
eZeed
dG dd deZdS )    )AnyUnionoverloadN   )add_end_docstringsis_torch_availableis_vision_availableloggingrequires_backends   )Pipelinebuild_pipeline_init_args)Image)
load_image)*MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES-MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES.MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMEST)has_image_processorc                       s  e Zd ZdZdZdZdZdZ fddZdd Z	e
d	eed
f dedeeeef  fddZe
d	eee ed
 f dedeeeeef   fddZd	eed
ee ed
 f dedeeeeef  eeeeef   f f fddZdddZdd Z	dddZ  ZS )ImageSegmentationPipelinea  
    Image segmentation pipeline using any `AutoModelForXXXSegmentation`. This pipeline predicts masks of objects and
    their classes.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> segmenter = pipeline(model="facebook/detr-resnet-50-panoptic")
    >>> segments = segmenter("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
    >>> len(segments)
    2

    >>> segments[0]["label"]
    'bird'

    >>> segments[1]["label"]
    'bird'

    >>> type(segments[0]["mask"])  # This is a black and white mask showing where is the bird on the original image.
    <class 'PIL.Image.Image'>

    >>> segments[0]["mask"].size
    (768, 512)
    ```


    This image segmentation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"image-segmentation"`.

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=image-segmentation).
    FTNc                    sl   t  j|i | | jdkrtd| j dt| d t }|t	 |t
 |t | | d S )NtfzThe z is only available in PyTorch.vision)super__init__	framework
ValueError	__class__r
   r   copyupdater   r   r   check_model_type)selfargskwargsmappingr    g/var/www/html/alpaca_bot/venv/lib/python3.10/site-packages/transformers/pipelines/image_segmentation.pyr   D   s   




z"ImageSegmentationPipeline.__init__c                 K   s   i }i }d|v r|d |d< |d |d< d|v r|d |d< d|v r(|d |d< d|v r2|d |d< d|v r<|d |d< |i |fS )Nsubtask	thresholdmask_thresholdoverlap_mask_area_thresholdtimeoutr%   )r    r"   preprocess_kwargspostprocess_kwargsr%   r%   r&   _sanitize_parametersQ   s   
z.ImageSegmentationPipeline._sanitize_parametersinputszImage.Imager"   returnc                 K      d S Nr%   r    r/   r"   r%   r%   r&   __call__b      z"ImageSegmentationPipeline.__call__c                 K   r1   r2   r%   r3   r%   r%   r&   r4   e   r5   c                    s6   d|v r	| d}|du rtdt j|fi |S )a	  
        Perform segmentation (detect masks & classes) in the image(s) passed as inputs.

        Args:
            inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
                The pipeline handles three types of images:

                - A string containing an HTTP(S) link pointing to an image
                - A string containing a local path to an image
                - An image loaded in PIL directly

                The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
                same format: all as HTTP(S) links, all as local paths, or all as PIL images.
            subtask (`str`, *optional*):
                Segmentation task to be performed, choose [`semantic`, `instance` and `panoptic`] depending on model
                capabilities. If not set, the pipeline will attempt tp resolve in the following order:
                  `panoptic`, `instance`, `semantic`.
            threshold (`float`, *optional*, defaults to 0.9):
                Probability threshold to filter out predicted masks.
            mask_threshold (`float`, *optional*, defaults to 0.5):
                Threshold to use when turning the predicted masks into binary values.
            overlap_mask_area_threshold (`float`, *optional*, defaults to 0.5):
                Mask overlap threshold to eliminate small, disconnected segments.
            timeout (`float`, *optional*, defaults to None):
                The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
                the call may block forever.

        Return:
            If the input is a single image, will return a list of dictionaries, if the input is a list of several images,
            will return a list of list of dictionaries corresponding to each image.

            The dictionaries contain the mask, label and score (where applicable) of each detected object and contains
            the following keys:

            - **label** (`str`) -- The class label identified by the model.
            - **mask** (`PIL.Image`) -- A binary mask of the detected object as a Pil Image of shape (width, height) of
              the original image. Returns a mask filled with zeros if no object is found.
            - **score** (*optional* `float`) -- Optionally, when the model is capable of estimating a confidence of the
              "object" described by the label and the mask.
        imagesNzICannot call the image-classification pipeline without an inputs argument!)popr   r   r4   r3   r$   r%   r&   r4   h   s
   ,
c                 C   s   t ||d}|j|jfg}| jjjjdkrL|d u ri }nd|gi}| jd
|gdd|}| jdkr8|	| j
}| j|d d| jjj| jdd |d< n| j|gdd}| jdkr_|	| j
}||d	< |S )N)r+   OneFormerConfigtask_inputspt)r6   return_tensors
max_length)paddingr<   r;   	input_idstarget_sizer%   )r   heightwidthmodelconfigr   __name__image_processorr   todtype	tokenizertask_seq_len)r    imager'   r+   r?   r"   r/   r%   r%   r&   
preprocess   s,   



z$ImageSegmentationPipeline.preprocessc                 C   s&   | d}| jdi |}||d< |S )Nr?   r%   )r7   rB   )r    model_inputsr?   model_outputsr%   r%   r&   _forward   s   
z"ImageSegmentationPipeline._forward?      ?c                 C   s  d }|dv rt | jdr| jj}n|dv rt | jdr| jj}|d urj||||||d dd }g }|d }	|d	 D ]-}
|	|
d
 kd }tj| tj	dd}| j
jj|
d  }|
d }||||d q:|S |dv rt | jdr| jj||d dd }g }| }	t|	}|D ]#}|	|kd }tj|tj	dd}| j
jj| }|d ||d q|S td| dt| j
 )N>   Npanoptic"post_process_panoptic_segmentation>   Ninstance"post_process_instance_segmentationr?   )r(   r)   r*   target_sizesr   segmentationsegments_infoid   L)modelabel_idscore)r]   labelmask>   Nsemantic"post_process_semantic_segmentation)rU   zSubtask z is not supported for model )hasattrrE   rR   rT   r   	fromarraynumpyastypenpuint8rB   rC   id2labelappendra   uniquer   type)r    rM   r'   r(   r)   r*   fnoutputs
annotationrV   segmentr_   r^   r]   labelsr%   r%   r&   postprocess   sP   

z%ImageSegmentationPipeline.postprocess)NN)NrO   rP   rP   )rD   
__module____qualname____doc___load_processor_load_image_processor_load_feature_extractor_load_tokenizerr   r.   r   r   strr   listdictr4   rK   rN   rq   __classcell__r%   r%   r$   r&   r      s.    #,8&
2r   )typingr   r   r   rd   rf   utilsr   r   r   r	   r
   baser   r   PILr   image_utilsr   models.auto.modeling_autor   r   r   r   
get_loggerrD   loggerr   r%   r%   r%   r&   <module>   s    
