
    <hE0              
          S SK r S SKJr  S SKJrJrJr  S SKJr  S SK	J
r
Jr  S SKJr  SSKJr  SS	KJrJrJrJr  SS
KJrJrJrJrJrJr  \(       a  SSKJr  \" 5       (       a  S SKr\" 5       (       a  S SKJ r!  O\" 5       (       a  S SK"J r!  SSS\\#\\#   4   S\$S\#S\4
S jr% " S S\5      r&\ " S S\5      5       r'S/r(g)    N)Iterable)TYPE_CHECKINGOptionalUnion)BatchFeature)group_images_by_shapereorder_images)BeitImageProcessorFast   )DefaultFastImageProcessorKwargs)IMAGENET_STANDARD_MEANIMAGENET_STANDARD_STDPILImageResamplingSizeDict)
TensorTypeauto_docstringis_torch_availableis_torchvision_availableis_torchvision_v2_availablerequires_backends)DepthEstimatorOutput)
functionalinput_imagetorch.Tensoroutput_sizekeep_aspect_ratiomultiplereturnc                     SS jnU R                   SS  u  pVUu  pxXu-  n	X-  n
U(       a#  [        SU
-
  5      [        SU	-
  5      :  a  U
n	OU	n
U" X-  US9nU" X-  US9n[        XS9$ )Nc                     [        X-  5      U-  nUb   XC:  a  [        R                  " X-  5      U-  nXB:  a  [        R                  " X-  5      U-  nU$ N)roundmathfloorceil)valr   min_valmax_valxs        [/var/www/html/shao/venv/lib/python3.13/site-packages/transformers/models/dpt/modular_dpt.pyconstrain_to_multiple_of>get_resize_output_image_size.<locals>.constrain_to_multiple_of>   sQ    #.!H,1;

3>*X5A;		#.)H4A       )r   heightwidth)r   N)shapeabsr   )r   r   r   r   r+   input_heightinput_widthoutput_heightoutput_widthscale_heightscale_width
new_height	new_widths                r*   get_resize_output_image_sizer=   8   s    	 !, 1 1"# 6L"-M !/L,Kq;#a,&6"77&L 'K),*EPXYJ()BXVI:77r-   c                   j    \ rS rSr% Sr\\   \S'   \\   \S'   \\   \S'   \\   \S'   \\   \S'   Sr	g	)
DPTFastImageProcessorKwargs_   a  
ensure_multiple_of (`int`, *optional*, defaults to 1):
    If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Can be overidden
    by `ensure_multiple_of` in `preprocess`.
do_pad (`bool`, *optional*, defaults to `False`):
    Whether to apply center padding. This was introduced in the DINOv2 paper, which uses the model in
    combination with DPT.
size_divisor (`int`, *optional*):
    If `do_pad` is `True`, pads the image dimensions to be divisible by this value. This was introduced in the
    DINOv2 paper, which uses the model in combination with DPT.
keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
    If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved. Can
    be overidden by `keep_aspect_ratio` in `preprocess`.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
    Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
    is used for background, and background itself is not included in all classes of a dataset (e.g.
    ADE20k). The background label will be replaced by 255.
ensure_multiple_ofsize_divisordo_padr   do_reduce_labels N)
__name__
__module____qualname____firstlineno____doc__r   int__annotations__bool__static_attributes__rE   r-   r*   r?   r?   _   s;    & !%3-TN~%tn$r-   r?   c            '          \ rS rSr\R
                  r\r\	r
SSS.rSrSrSrSrSrSrSrSrSrSrSr\r    S(S	S
S\SSS\S\\   S\SS
4S jjr S)S	S
S\SS
4S jjrS\S
   S\S\S\S\S   S\S\S\S\S\S\\ \\\   4      S\\ \\\   4      S\S\\   S\S\\   S \\   S!\\ \!\"4      S\#4&S" jr$ S*S#S$S%\\ \"\\%\\4      S4      S\\&\!\"4      4S& jjr'S'r(g)+DPTImageProcessorFastz   i  r0   TFgp?r/   Nimager   sizeinterpolationzF.InterpolationMode	antialiasrA   r   r   c                     UR                   (       a  UR                  (       d  [        SUR                  5        35      e[	        UUR                   UR                  4UUS9n[        5       R                  XX4S9$ )a  
Resize an image to `(size["height"], size["width"])`.

Args:
    image (`torch.Tensor`):
        Image to resize.
    size (`SizeDict`):
        Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
    interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
        `InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
    antialias (`bool`, *optional*, defaults to `True`):
        Whether to use antialiasing when resizing the image
    ensure_multiple_of (`int`, *optional*):
        If `do_resize` is `True`, the image is resized to a size that is a multiple of this value
    keep_aspect_ratio (`bool`, *optional*, defaults to `False`):
        If `True`, and `do_resize` is `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.

Returns:
    `torch.Tensor`: The resized image.
zDThe size dictionary must contain the keys 'height' and 'width'. Got )r   r   r   )rT   rU   )r1   r2   
ValueErrorkeysr=   r
   resize)selfrR   rS   rT   rU   rA   r   r   s           r*   rY   DPTImageProcessorFast.resize   sn    : {{$**cdhdmdmdocpqrr2djj1/'	
 &'..uQ^.ttr-   rB   c                     UR                   SS u  p4S nU" X25      u  pgU" XB5      u  pXX4n
[        R                  " X5      $ )aL  
Center pad a batch of images to be a multiple of `size_divisor`.

Args:
    image (`torch.Tensor`):
        Image to pad.  Can be a batch of images of dimensions (N, C, H, W) or a single image of dimensions (C, H, W).
    size_divisor (`int`):
        The width and height of the image will be padded to a multiple of this number.
r.   Nc                 X    [         R                  " X-  5      U-  nX -
  nUS-  nX4-
  nXE4$ )N   )r#   r%   )rS   rB   new_sizepad_sizepad_size_leftpad_size_rights         r*   _get_pad1DPTImageProcessorFast.pad_image.<locals>._get_pad   s9    yy!45DHH$MM%5N 00r-   )r3   Fpad)rZ   rR   rB   r1   r2   rc   pad_top
pad_bottompad_left	pad_rightpaddings              r*   	pad_imageDPTImageProcessorFast.pad_image   sP     BC(	1 'v<&u;i<uuU$$r-   imagesrD   	do_resizedo_center_crop	crop_size
do_rescalerescale_factordo_normalize
image_mean	image_stdrC   disable_groupingreturn_tensorsc           	      ,   U(       a  U R                  U5      n[        UUS9u  nn0 nUR                  5        H%  u  nnU(       a  U R                  UUUUUS9nUUU'   M'     [	        UU5      n[        UUS9u  nn0 nUR                  5        HQ  u  nnU(       a  U R                  UU5      nU(       a  U R                  UU5      nU R                  UXXU5      nUUU'   MS     [	        UU5      nU(       a  [        R                  " USS9OUn[        SU0S9$ )N)rw   )rR   rS   rT   rA   r   r   )dimpixel_values)data)reduce_labelr   itemsrY   r	   center_croprl   rescale_and_normalizetorchstackr   )rZ   rn   rD   ro   rS   rT   rp   rq   rr   rs   rt   ru   rv   r   rA   rC   rB   rw   rx   kwargsgrouped_imagesgrouped_images_indexresized_images_groupedr3   stacked_imagesresized_imagesprocessed_images_groupedprocessed_imagess                               r*   _preprocess!DPTImageProcessorFast._preprocess   sI   , &&v.F 0EV^n/o,,!#%3%9%9%;!E>!%("/'9&7 "- " -;"5) &< ((>@TU 0E^fv/w,,#% %3%9%9%;!E>!%!1!1.)!L!%!M!77
LV_N /=$U+ &< **BDXYCQ5;;'7Q?Wg.2B!CDDr-   outputsr   target_sizesc                    [        U S5        UR                  nUb#  [        U5      [        U5      :w  a  [        S5      e/ nUc  S/[        U5      -  OUn[	        X25       Hq  u  pVUbV  [
        R                  R                  R                  UR                  S5      R                  S5      USSS9R                  5       nUR                  S	U05        Ms     U$ )
aj  
Converts the raw output of [`DepthEstimatorOutput`] into final depth predictions and depth PIL images.
Only supports PyTorch.

Args:
    outputs ([`DepthEstimatorOutput`]):
        Raw outputs of the model.
    target_sizes (`TensorType` or `List[Tuple[int, int]]`, *optional*):
        Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size
        (height, width) of each image in the batch. If left to None, predictions will not be resized.

Returns:
    `List[Dict[str, TensorType]]`: A list of dictionaries of tensors representing the processed depth
    predictions.
r   Nz]Make sure that you pass in as many target sizes as the batch dimension of the predicted depthr   r/   bicubicF)rS   modealign_cornerspredicted_depth)r   r   lenrW   zipr   nnr   interpolate	unsqueezesqueezeappend)rZ   r   r   r   resultsdepthtarget_sizes          r*   post_process_depth_estimation3DPTImageProcessorFast.post_process_depth_estimation  s    ( 	$(!11$3+?3|CT+To  8D8LvO 44R^"%o"DE&++77OOA&003+Iej 8 ')  NN-u56 #E r-   rE   )NTr/   F)r/   r!   ))rF   rG   rH   rI   r   BICUBICresampler   ru   r   rv   rS   ro   rr   rt   rC   rs   rA   r   rD   rq   rp   r?   valid_kwargsr   rM   r   rK   rY   rl   listfloatr   strr   r   r   tupledictr   rN   rE   r-   r*   rP   rP   z   se   !))H'J%IC(DIJLFNIN.L 04,-"'&u&u &u -	&u
 &u %SM&u  &u 
&uV %% % 
	%89E^$9E 9E 	9E
 9E   569E 9E 9E 9E 9E 9E U5$u+#5679E E%e"4569E  9E %SM9E  !9E" sm#9E$ #4.%9E& !sJ!78'9E* 
+9E| RV''' uZeCHo1F%LMN' 
d3
?#	$	' 'r-   rP   ))r#   collections.abcr   typingr   r   r   "transformers.image_processing_baser   transformers.image_transformsr   r	   3transformers.models.beit.image_processing_beit_fastr
   image_processing_utils_fastr   image_utilsr   r   r   r   utilsr   r   r   r   r   r   modeling_outputsr   r   torchvision.transforms.v2r   re   torchvision.transformsrK   rM   r=   r?   rP   __all__rE   r-   r*   <module>r      s   "  $ 1 1 ; O V   8  96$8$8sHSM)*$8 $8 	$8
 $8N%"A %6 y2 y yx #
#r-   