
    gCz                        d dl Z d dlZd dlmZ d dlmZmZmZmZm	Z	m
Z
mZ d dlZd dlZd dlmZ ddlmZmZmZmZmZmZmZmZmZmZmZ ddlmZmZm Z m!Z!m"Z"m#Z#  e            rd dl$Z%d dl&Z% ej'         ej'        e%j(                  j)                   ej'        d          k    re%j*        j+        Z,ne%j*        Z, e            rPd d	l-m.Z. e,j/        e.j/        e,j0        e.j0        e,j1        e.j1        e,j2        e.j2        e,j3        e.j3        e,j4        e.j4        iZ5er e            rd dl6Z6 ej7        e8          Z9ed
ej:        ded
         eej:                 ed         f         Z;eed
         dded         ed         eed
                  eed                  eed                  f         Z< G d de          Z= G d de          Z> G d de          Z?ee@eeAe@ee         f         f         ZBd ZC G d de          ZDd ZEd ZFd ZGd ZHdej:        deIfdZJdGdeAdee;         fd ZKdej:        fd!ZL	 dHdej:        d"e	eeAe
eAd#f         f                  de=fd$ZM	 dHdej:        d%e	ee=e@f                  deAfd&ZNdHdej:        d'e=de
eAeAf         fd(ZOd)ee@eee
f         f         deIfd*ZPd)ee@eee
f         f         deIfd+ZQd,eee@eee
f         f                  deIfd-ZRd,eee@eee
f         f                  deIfd.ZSdHdee@d
f         d/e	eT         dd
fd0ZU	 	 	 	 	 	 	 	 	 	 	 	 dId1e	eI         d2e	eT         d3e	eI         d4e	eeTeeT         f                  d5e	eeTeeT         f                  d6e	eI         d7e	eA         d8e	eI         d9e	ee@eAf                  d:e	eI         d;e	ee@eAf                  d<e	d=         fd>ZV G d? d@          ZWdAe>dBe
e>d#f         d,ee         ddfdCZXdDee@         dEee@         fdFZYdS )J    N)BytesIO)TYPE_CHECKINGDictIterableListOptionalTupleUnion)version   )ExplicitEnumis_jax_tensoris_numpy_arrayis_tf_tensoris_torch_availableis_torch_tensoris_torchvision_availableis_vision_availableloggingrequires_backendsto_numpy)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STDIMAGENET_STANDARD_MEANIMAGENET_STANDARD_STDOPENAI_CLIP_MEANOPENAI_CLIP_STDz9.1.0)InterpolationModezPIL.Image.Imageztorch.Tensorz
np.ndarrayznp.ndarrrayc                       e Zd ZdZdZdS )ChannelDimensionchannels_firstchannels_lastN)__name__
__module____qualname__FIRSTLAST     T/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/image_utils.pyr    r    _   s        EDDDr)   r    c                       e Zd ZdZdZdS )AnnotationFormatcoco_detectioncoco_panopticN)r#   r$   r%   COCO_DETECTIONCOCO_PANOPTICr(   r)   r*   r,   r,   d   s        %N#MMMr)   r,   c                   >    e Zd Zej        j        Zej        j        ZdS )AnnotionFormatN)r#   r$   r%   r,   r/   valuer0   r(   r)   r*   r2   r2   i   s$        %4:N$28MMMr)   r2   c                 \    t                      ot          | t          j        j                  S N)r   
isinstancePILImageimgs    r*   is_pil_imager;   q   s!      EZSY_%E%EEr)   c                   "    e Zd ZdZdZdZdZdZdS )	ImageTypepillowtorchnumpy
tensorflowjaxN)r#   r$   r%   r7   TORCHNUMPY
TENSORFLOWJAXr(   r)   r*   r=   r=   u   s'        
CEEJ
CCCr)   r=   c                 N   t          |           rt          j        S t          |           rt          j        S t          |           rt          j        S t          |           rt          j        S t          |           rt          j
        S t          dt          |                      )NzUnrecognised image type )r;   r=   r7   r   rC   r   rD   r   rE   r   rF   
ValueErrortypeimages    r*   get_image_typerL   }   s    E }u e E $##U }
=U==
>
>>r)   c                     t          |           p;t          |           p,t          |           pt          |           pt	          |           S r5   )r;   r   r   r   r   r9   s    r*   is_valid_imagerN      sG    vs 3 3vs7K7Kv|\_O`O`vdqrudvdvvr)   c                     t          | t          t          f          r| D ]}t          |          s dS nt	          |           sdS dS )NFT)r6   listtuplevalid_imagesrN   )imgsr:   s     r*   rR   rR      sd    $u&&  	 	C$$ uu	 D!! u4r)   c                 h    t          | t          t          f          rt          | d                   S dS )Nr   F)r6   rP   rQ   rN   r9   s    r*   
is_batchedrU      s/    #e}%% &c!f%%%5r)   rK   returnc                     | j         t          j        k    rdS t          j        |           dk    ot          j        |           dk    S )zV
    Checks to see whether the pixel values have already been rescaled to [0, 1].
    Fr   r   )dtypenpuint8minmaxrJ   s    r*   is_scaled_imager]      s?     {bhu 6%==A4"&--1"44r)      expected_ndimsc           	      f   t          |           r| S t          | t          j        j                  r| gS t	          |           rP| j        |dz   k    rt          |           } n0| j        |k    r| g} n!t          d|dz    d| d| j         d          | S t          dt          |            d          )a  
    Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.
    If the input is a batch of images, it is converted to a list of images.

    Args:
        images (`ImageInput`):
            Image of images to turn into a list of images.
        expected_ndims (`int`, *optional*, defaults to 3):
            Expected number of dimensions for a single input image. If the input image has a different number of
            dimensions, an error is raised.
    r   z%Invalid image shape. Expected either z or z dimensions, but got z dimensions.ztInvalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray, but got .)	rU   r6   r7   r8   rN   ndimrP   rH   rI   )imagesr_   s     r*   make_list_of_imagesrd      s     &  &#)/** xf ;.1,,,&\\FF[N**XFF.8J . .P^ . .K. . .   
	0 $V	0 	0 	0  r)   c                     t          |           st          dt          |                      t                      r3t	          | t
          j        j                  rt          j        |           S t          |           S )NzInvalid image type: )
rN   rH   rI   r   r6   r7   r8   rY   arrayr   r9   s    r*   to_numpy_arrayrg      sl    # =;S		;;<<< C!A!A x}}C==r)   num_channels.c                    ||nd}t          |t                    r|fn|}| j        dk    rd\  }}n(| j        dk    rd\  }}nt          d| j                   | j        |         |v r>| j        |         |v r/t
                              d| j         d           t          j        S | j        |         |v rt          j        S | j        |         |v rt          j	        S t          d	          )
a[  
    Infers the channel dimension format of `image`.

    Args:
        image (`np.ndarray`):
            The image to infer the channel dimension of.
        num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):
            The number of channels of the image.

    Returns:
        The channel dimension of the image.
    Nr   r^   r^   )r         z(Unsupported number of image dimensions: z4The channel dimension is ambiguous. Got image shape z,. Assuming channels are the first dimension.z(Unable to infer channel dimension format)
r6   intrb   rH   shapeloggerwarningr    r&   r'   )rK   rh   	first_dimlast_dims       r*   infer_channel_dimension_formatrs      s    $0#;<<L&0s&C&CUL??LzQ"	88	q"	88PEJPPQQQ{9--%+h2G<2W2W|5;|||	
 	
 	
  %%	Y	<	/	/%%	X	,	.	.$$
?
@
@@r)   input_data_formatc                     |t          |           }|t          j        k    r
| j        dz
  S |t          j        k    r
| j        dz
  S t          d|           )a  
    Returns the channel dimension axis of the image.

    Args:
        image (`np.ndarray`):
            The image to get the channel dimension axis of.
        input_data_format (`ChannelDimension` or `str`, *optional*):
            The channel dimension format of the image. If `None`, will infer the channel dimension from the image.

    Returns:
        The channel dimension axis of the image.
    Nr^   r   Unsupported data format: )rs   r    r&   rb   r'   rH   )rK   rt   s     r*   get_channel_dimension_axisrw     sf      :5AA,222zA~	.3	3	3zA~
D1BDD
E
EEr)   channel_dimc                     |t          |           }|t          j        k    r| j        d         | j        d         fS |t          j        k    r| j        d         | j        d         fS t          d|           )a  
    Returns the (height, width) dimensions of the image.

    Args:
        image (`np.ndarray`):
            The image to get the dimensions of.
        channel_dim (`ChannelDimension`, *optional*):
            Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.

    Returns:
        A tuple of the image's height and width.
    Nrv   )rs   r    r&   rn   r'   rH   )rK   rx   s     r*   get_image_sizer}     sz     4U;;&,,,{2B//	(-	-	-{2B//B[BBCCCr)   
annotationc                     t          | t                    rfd| v rbd| v r^t          | d         t          t          f          r<t	          | d                   dk    s!t          | d         d         t                    rdS dS )Nimage_idannotationsr   TFr6   dictrP   rQ   lenr~   s    r*   "is_valid_annotation_coco_detectionr   1  s    :t$$
*$$Z''z-04-@@ ( 
=)**a//:j>WXY>Z\`3a3a/ t5r)   c                    t          | t                    rjd| v rfd| v rbd| v r^t          | d         t          t          f          r<t	          | d                   dk    s!t          | d         d         t                    rdS dS )Nr   segments_info	file_namer   TFr   r   s    r*   !is_valid_annotation_coco_panopticr   @  s    :t$$*$$z)):%%z/2T5MBB & 
?+,,11Z
?@[\]@^`d5e5e1 t5r)   r   c                 4    t          d | D                       S )Nc              3   4   K   | ]}t          |          V  d S r5   )r   .0anns     r*   	<genexpr>z3valid_coco_detection_annotations.<locals>.<genexpr>Q  s+      NN31#66NNNNNNr)   allr   s    r*    valid_coco_detection_annotationsr   P  s    NN+NNNNNNr)   c                 4    t          d | D                       S )Nc              3   4   K   | ]}t          |          V  d S r5   )r   r   s     r*   r   z2valid_coco_panoptic_annotations.<locals>.<genexpr>U  s+      MM#055MMMMMMr)   r   r   s    r*   valid_coco_panoptic_annotationsr   T  s    MMMMMMMMr)   timeoutc                    t          t          dg           t          | t                    r[|                     d          s|                     d          rGt
          j                            t          t          j
        | |          j                            } nt          j                            |           r t
          j                            |           } n|                     d          r|                     d          d         } 	 t!          j        |                                           }t
          j                            t          |                    } nX# t&          $ r}t)          d|  d	|           d
}~ww xY wt          | t
          j        j                  r| } nt+          d          t
          j                            |           } |                     d          } | S )a3  
    Loads `image` to a PIL Image.

    Args:
        image (`str` or `PIL.Image.Image`):
            The image to convert to the PIL Image format.
        timeout (`float`, *optional*):
            The timeout value in seconds for the URL request.

    Returns:
        `PIL.Image.Image`: A PIL Image.
    visionzhttp://zhttps://)r   zdata:image/,r   zIncorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got z. Failed with NzuIncorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image.RGB)r   
load_imager6   str
startswithr7   r8   openr   requestsgetcontentospathisfilesplitbase64decodebytesencode	ExceptionrH   	TypeErrorImageOpsexif_transposeconvert)rK   r   b64es       r*   r   r   X  s    j8*---% 
I&& 	%*:*::*F*F 	 INN78<w+O+O+O+W#X#XYYEEW^^E"" 	INN5))EE.. ,C((+(88	ws||44     B  jo  B  B  @  B  B   
E39?	+	+ 
 D
 
 	
 L''..EMM%  ELs   AE! !
F+F  F
do_rescalerescale_factordo_normalize
image_mean	image_stddo_padsize_divisibilitydo_center_crop	crop_size	do_resizesizeresamplePILImageResamplingc                     | r|t          d          |r|t          d          |r||t          d          |r|t          d          |	r|
|t          d          dS dS )a  
    Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.
    Raises `ValueError` if arguments incompatibility is caught.
    Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,
    sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow
    existing arguments when possible.

    Nz=`rescale_factor` must be specified if `do_rescale` is `True`.zzDepending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.zP`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.z<`crop_size` must be specified if `do_center_crop` is `True`.zA`size` and `resample` must be specified if `do_resize` is `True`.)rH   )r   r   r   r   r   r   r   r   r   r   r   r   s               r*   validate_preprocess_argumentsr     s    ,  Zn,XYYY 
#+ I
 
 	
  m+y/@klll Y)+WXXX ^dlh&6\]]]^ ^&6&6r)   c                       e Zd ZdZd ZddZd Zdej        de	e
ef         dej        fd	ZddZd ZddZddZd Zd ZddZdS )ImageFeatureExtractionMixinzD
    Mixin that contain utilities for preparing image features.
    c                     t          |t          j        j        t          j        f          s/t          |          s"t          dt          |           d          d S d S )Nz	Got type zS which is not supported, only `PIL.Image.Image`, `np.array` and `torch.Tensor` are.)r6   r7   r8   rY   ndarrayr   rH   rI   selfrK   s     r*   _ensure_format_supportedz4ImageFeatureExtractionMixin._ensure_format_supported  sq    %#)/2:!>?? 	X]H^H^ 	&DKK & & &  	 	 	 	r)   Nc                    |                      |           t          |          r|                                }t          |t          j                  r|%t          |j        d         t          j                  }|j        dk    r&|j	        d         dv r|
                    ddd          }|r|dz  }|                    t          j                  }t          j                            |          S |S )a"  
        Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
        needed.

        Args:
            image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
                The image to convert to the PIL Image format.
            rescale (`bool`, *optional*):
                Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
                default to `True` if the image type is a floating type, `False` otherwise.
        Nr   r^   rj   r   rk      )r   r   r@   r6   rY   r   flatfloatingrb   rn   	transposeastyperZ   r7   r8   	fromarray)r   rK   rescales      r*   to_pil_imagez(ImageFeatureExtractionMixin.to_pil_image  s     	%%e,,,5!! 	"KKMMEeRZ(( 
	.$UZ]BK@@zQ5;q>V#;#;1a00 $LL**E9&&u---r)   c                     |                      |           t          |t          j        j                  s|S |                    d          S )z
        Converts `PIL.Image.Image` to RGB format.

        Args:
            image (`PIL.Image.Image`):
                The image to convert.
        r   )r   r6   r7   r8   r   r   s     r*   convert_rgbz'ImageFeatureExtractionMixin.convert_rgb  sE     	%%e,,,%11 	L}}U###r)   rK   scalerV   c                 6    |                      |           ||z  S )z7
        Rescale a numpy image by scale amount
        )r   )r   rK   r   s      r*   r   z#ImageFeatureExtractionMixin.rescale  s"     	%%e,,,u}r)   Tc                    |                      |           t          |t          j        j                  rt	          j        |          }t          |          r|                                }|%t          |j        d         t          j	                  n|}|r3| 
                    |                    t          j                  d          }|r"|j        dk    r|                    ddd          }|S )a  
        Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first
        dimension.

        Args:
            image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
                The image to convert to a NumPy array.
            rescale (`bool`, *optional*):
                Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will
                default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.
            channel_first (`bool`, *optional*, defaults to `True`):
                Whether or not to permute the dimensions of the image to put the channel dimension first.
        Nr   p?r^   rk   r   )r   r6   r7   r8   rY   rf   r   r@   r   integerr   r   float32rb   r   )r   rK   r   channel_firsts       r*   rg   z*ImageFeatureExtractionMixin.to_numpy_array  s     	%%e,,,eSY_-- 	$HUOOE5!! 	"KKMME;B?*UZ]BJ777PW 	FLLbj!9!99EEE 	-UZ1__OOAq!,,Er)   c                     |                      |           t          |t          j        j                  r|S t	          |          r|                    d          }nt          j        |d          }|S )z
        Expands 2-dimensional `image` to 3 dimensions.

        Args:
            image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
                The image to expand.
        r   )axis)r   r6   r7   r8   r   	unsqueezerY   expand_dimsr   s     r*   r   z'ImageFeatureExtractionMixin.expand_dims  sq     	%%e,,, eSY_-- 	L5!! 	2OOA&&EEN5q111Er)   Fc                    |                      |           t          |t          j        j                  r|                     |d          }n|rt          |t
          j                  r4|                     |                    t
          j	                  d          }n7t          |          r(|                     |                                d          }t          |t
          j                  rt          |t
          j                  s,t          j        |                              |j                  }t          |t
          j                  s,t          j        |                              |j                  }nt          |          rddl}t          ||j                  s;t          |t
          j                  r |j        |          }n |j        |          }t          ||j                  s;t          |t
          j                  r |j        |          }n |j        |          }|j        dk    r-|j        d         dv r||ddddf         z
  |ddddf         z  S ||z
  |z  S )a  
        Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
        if it's a PIL Image.

        Args:
            image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
                The image to normalize.
            mean (`List[float]` or `np.ndarray` or `torch.Tensor`):
                The mean (per channel) to use for normalization.
            std (`List[float]` or `np.ndarray` or `torch.Tensor`):
                The standard deviation (per channel) to use for normalization.
            rescale (`bool`, *optional*, defaults to `False`):
                Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will
                happen automatically.
        T)r   r   r   Nr^   rj   )r   r6   r7   r8   rg   rY   r   r   r   r   r   floatrf   rX   r?   Tensor
from_numpytensorrb   rn   )r   rK   meanstdr   r?   s         r*   	normalizez%ImageFeatureExtractionMixin.normalize!  s6     	%%e,,,eSY_-- 	?''t'<<EE  	?%,, ?U\\"*%=%=yII '' ?U[[]]I>>eRZ(( 	,dBJ// :x~~,,U[99c2:.. 8hsmm**5;77U## 	,LLLdEL11 .dBJ// .+5+D11DD'5<--Dc5<00 ,c2:.. ,*%*3//CC&%,s++C:??u{1~77DD$//3qqq$}3EEEDLC''r)   c                    ||nt           j        }|                     |           t          |t          j        j                  s|                     |          }t          |t                    rt          |          }t          |t                    st          |          dk    r|r*t          |t                    r||fn|d         |d         f}n|j        \  }}||k    r||fn||f\  }}	t          |t                    r|n|d         }
||
k    r|S |
t          |
|	z  |z            }}|8||
k    rt          d| d|           ||k    rt          ||z  |z            |}}||k    r||fn||f}|                    ||          S )a  
        Resizes `image`. Enforces conversion of input to PIL.Image.

        Args:
            image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
                The image to resize.
            size (`int` or `Tuple[int, int]`):
                The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be
                matched to this.

                If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
                `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to
                this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
            resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
                The filter to user for resampling.
            default_to_square (`bool`, *optional*, defaults to `True`):
                How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a
                square (`size`,`size`). If set to `False`, will replicate
                [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
                with support for resizing only the smallest edge and providing an optional `max_size`.
            max_size (`int`, *optional*, defaults to `None`):
                The maximum allowed for the longer edge of the resized image: if the longer edge of the image is
                greater than `max_size` after being resized according to `size`, then the image is resized again so
                that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller
                edge may be shorter than `size`. Only used if `default_to_square` is `False`.

        Returns:
            image: A resized `PIL.Image.Image`.
        Nr   r   zmax_size = zN must be strictly greater than the requested size for the smaller edge size = )r   )r   BILINEARr   r6   r7   r8   r   rP   rQ   rm   r   r   rH   resize)r   rK   r   r   default_to_squaremax_sizewidthheightshortlongrequested_new_short	new_shortnew_longs                r*   r   z"ImageFeatureExtractionMixin.resizeU  s   <  (3889K9T%%e,,,%11 	-%%e,,EdD!! 	;;DdC   	[CIINN  ['1$'<'<Td||47DQRGBT %
v16&ufoovuot.8s.C.C&Pdda#/// L&93?RUY?Y\a?a;b;b8	'#666(G( G G@DG G    (**.1(Y2F2Q.R.RT\8	05	8,,hPYEZ||D8|444r)   c                 ^   |                      |           t          |t                    s||f}t          |          st          |t          j                  rN|j        dk    r|                     |          }|j        d         dv r|j        dd         n|j        dd         }n|j	        d         |j	        d         f}|d         |d         z
  dz  }||d         z   }|d         |d         z
  dz  }||d         z   }t          |t          j        j                  r|                    ||||f          S |j        d         dv rdnd}|sWt          |t          j                  r|                    ddd          }t          |          r|                    ddd          }|dk    r-||d         k    r!|dk    r||d         k    r|d||||f         S |j        dd	         t          |d         |d                   t          |d         |d                   fz   }	t          |t          j                  rt	          j        ||	
          }
n$t          |          r|                    |	          }
|	d	         |d         z
  dz  }||d         z   }|	d         |d         z
  dz  }||d         z   }||
d||||f<   ||z  }||z  }||z  }||z  }|
dt          d|          t%          |
j        d	         |          t          d|          t%          |
j        d         |          f         }
|
S )a  
        Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the
        size given, it will be padded (so the returned result has the size asked).

        Args:
            image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):
                The image to resize.
            size (`int` or `Tuple[int, int]`):
                The size to which crop the image.

        Returns:
            new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,
            height, width).
        rk   r   rj   r   NTF.rz   )rn   r{   )r   r6   rQ   r   rY   r   rb   r   rn   r   r7   r8   cropr   permuter\   
zeros_like	new_zerosr[   )r   rK   r   image_shapetopbottomleftrightr   	new_shape	new_imagetop_pad
bottom_padleft_pad	right_pads                  r*   center_cropz'ImageFeatureExtractionMixin.center_crop  sm    	%%e,,,$&& 	 $<D 5!! 	9Zrz%B%B 	9zQ((//-2[^v-E-E%+abb//5;WYXYWY?KK :a=%*Q-8K1~Q'A-tAwAa(Q.tAw eSY_-- 	:::tS%8999 !&A& 8 8e  	/%,, 11a00u%% /aA.. !88+a.00TQYY5KXYNCZCZc&j$u*455 K$DG[^(D(Dc$q'S^_`SaFbFb'cc	eRZ(( 	3e9===IIU## 	3	22IR=;q>1a7{1~-
bMKN2q8{1~-	AF	#wz)8I+==>w'Qs9?2#6???QPST]TcdfTginPoPoAoo
	 r)   c                     |                      |           t          |t          j        j                  r|                     |          }|dddddddf         S )a  
        Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of
        `image` to a NumPy array if it's a PIL Image.

        Args:
            image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
                The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should
                be first.
        Nr{   )r   r6   r7   r8   rg   r   s     r*   flip_channel_orderz.ImageFeatureExtractionMixin.flip_channel_order  s`     	%%e,,,eSY_-- 	/''..ETTrT111aaaZ  r)   r   c                     ||nt           j        j        }|                     |           t	          |t           j        j                  s|                     |          }|                    ||||||          S )a  
        Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees
        counter clockwise around its centre.

        Args:
            image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
                The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before
                rotating.

        Returns:
            image: A rotated `PIL.Image.Image`.
        N)r   expandcenter	translate	fillcolor)r7   r8   NEARESTr   r6   r   rotate)r   rK   angler   r  r  r  r	  s           r*   r  z"ImageFeatureExtractionMixin.rotate  s}      (3889J%%e,,,%11 	-%%e,,E||HVFicl  
 
 	
r)   r5   )NT)F)NTN)Nr   NNN)r#   r$   r%   __doc__r   r   r   rY   r   r
   r   rm   r   rg   r   r   r   r  r  r  r(   r)   r*   r   r     s             <$ $ $RZ eSj0A bj       @  (2( 2( 2( 2(hA5 A5 A5 A5FI I IV! ! !"
 
 
 
 
 
r)   r   annotation_formatsupported_annotation_formatsc                     | |vrt          dt           d|           | t          j        u rt	          |          st          d          | t          j        u rt          |          st          d          d S d S )NzUnsupported annotation format: z must be one of zInvalid COCO detection annotations. Annotations must a dict (single image) or list of dicts (batch of images) with the following keys: `image_id` and `annotations`, with the latter being a list of annotations in the COCO format.zInvalid COCO panoptic annotations. Annotations must a dict (single image) or list of dicts (batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with the latter being a list of annotations in the COCO format.)rH   formatr,   r/   r   r0   r   )r  r  r   s      r*   validate_annotationsr    s    
  <<<q6qqSoqqrrr,;;;/<< 	B   ,:::.{;; 	M   ;:	 	r)   valid_processor_keyscaptured_kwargsc                     t          |                              t          |                     }|r5d                    |          }t                              d| d           d S d S )Nz, zUnused or unrecognized kwargs: ra   )set
differencejoinro   rp   )r  r  unused_keysunused_key_strs       r*   validate_kwargsr  &  sp    o&&11#6J2K2KLLK L;//JJJJKKKKKL Lr)   )r^   r5   )NNNNNNNNNNNN)Zr   r   ior   typingr   r   r   r   r   r	   r
   r@   rY   r   	packagingr   utilsr   r   r   r   r   r   r   r   r   r   r   utils.constantsr   r   r   r   r   r   	PIL.Imager7   PIL.ImageOpsparse__version__base_versionr8   
Resamplingr   torchvision.transformsr   r
  BOXr   HAMMINGBICUBICLANCZOSpil_torch_interpolation_mappingr?   
get_loggerr#   ro   r   
ImageInput
VideoInputr    r,   r2   r   rm   AnnotationTyper;   r=   rL   rN   rR   rU   boolr]   rd   rg   rs   rw   r}   r   r   r   r   r   r   r   r   r  r  r(   r)   r*   <module>r2     s     				       N N N N N N N N N N N N N N N N N N                                                     
w}]W]3?33@AA]W]SZE[E[[[ Y1 Y!! 

<<<<<< &(9(A"$5$9'):)C&(9(A&(9(A&(9(A+
'    
	H	%	% rz>48I3JDQSQ[L\^bcq^rr

 		 !m	n	 	
    |   
$ $ $ $ $| $ $ $
9 9 9 9 9\ 9 9 9
 c5c4:!5667F F F       ? ? ?w w w	 	 	  52: 5$ 5 5 5 5$ $ $D<L $ $ $ $N2:     NR"A "A:"A%-eCsCx4H.I%J"A"A "A "A "AL TXF F:F*259I39N3O*PFF F F F0D D"* D3C DuUXZ]U] D D D D04U4;=O8O3P UY    $sE$+<N7N2O TX     O(4U4QV;EW@W;X2Y O^b O O O ON$sE$PU+DV?V:W1X N]a N N N N) )eC!223 )huo )Yj ) ) ) )Z "&&*#'6:59!'+%)*. $%)/3&^ &^&^UO&^ 4.&^ ud5k123	&^
 eT%[012&^ TN&^  }&^ TN&^ S#X'&^ ~&^ 4S>
"&^ +,&^ &^ &^ &^T\
 \
 \
 \
 \
 \
 \
 \
~
'"'(8#(="> d 
	   2L$s) Ld3i L L L L L Lr)   