
    gF                     F   d dl Z d dlmZ d dlmZmZmZmZmZ d dl	Z
ddlmZmZmZmZmZ ddlmZmZmZmZmZ ddlmZmZmZmZmZmZmZ  e            r
d dlZddlm Z   e            rd dl!Z! e            rd dl"Z# e            rd dl$m	Z%  e            rd d	l&m'Z( n e            rd d	l)m'Z( 	 dGd
e
j*        deee+f         deeee+f                  de
j*        fdZ,de
j-        dfd
e
j*        de.dee         de
j/        deee+ef                  de
j*        fdZ0d Z1	 	 	 dHd
ee
j*        ddddf         dee2         dee+         deee+ef                  ddf
dZ3	 	 	 dIde
j*        dee4ee4e4f         ee4         ee4         f         de2d ee4         deee+ef                  de5fd!Z6	 	 	 	 	 dJd
e
j*        dee4e4f         d"d#d$ee4         dee         d%e2deee+ef                  de
j*        fd&Z7	 	 dKd
e
j*        d'ee.ee.         f         d(ee.ee.         f         dee         deee+ef                  de
j*        fd)Z8	 	 	 dHd
e
j*        dee4e4f         deee+ef                  deee+ef                  d%ee2         de
j*        fd*Z9dLd,Z:d+e
j*        de
j*        fd-Z;dMd.Z<d+edefd/Z=dNd1Z>d0e
j*        de
j*        fd2Z?dOd3Z@d0edefd4ZAd5 ZBd6 ZC G d7 d8e          ZDeDjE        d9ddfd
e
j*        d:ee4ee4e4f         eee4e4f                  f         d;eDd<ee.ee.         f         deee+ef                  deee+ef                  de
j*        fd=ZFd
edefd>ZG	 	 dKd
e
j*        dee         deee+ef                  de
j*        fd?ZHd@ ZI G dA dB          ZJ G dC dD          ZK G dE dF          ZLdS )P    N)ceil)IterableListOptionalTupleUnion   )ChannelDimension
ImageInputget_channel_dimension_axisget_image_sizeinfer_channel_dimension_format)ExplicitEnum
TensorTypeis_jax_tensoris_tf_tensoris_torch_tensor)is_flax_availableis_tf_availableis_torch_availableis_torchvision_availableis_torchvision_v2_availableis_vision_availablerequires_backends)PILImageResampling)
functionalimagechannel_diminput_channel_dimreturnc                    t          | t          j                  st          dt	          |                      |t          |           }t          |          }||k    r| S |t          j        k    r|                     d          } nH|t          j	        k    r|                     d          } n"t          d                    |                    | S )a)  
    Converts `image` to the channel dimension format specified by `channel_dim`.

    Args:
        image (`numpy.ndarray`):
            The image to have its channel dimension set.
        channel_dim (`ChannelDimension`):
            The channel dimension format to use.
        input_channel_dim (`ChannelDimension`, *optional*):
            The channel dimension format of the input image. If not provided, it will be inferred from the input image.

    Returns:
        `np.ndarray`: The image with the channel dimension set to `channel_dim`.
    ,Input image must be of type np.ndarray, got N)   r   r	   )r	   r#   r   z(Unsupported channel dimension format: {})
isinstancenpndarray	TypeErrortyper   r
   FIRST	transposeLAST
ValueErrorformat)r   r   r   target_channel_dims       Y/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/image_transforms.pyto_channel_dimension_formatr0   =   s    & eRZ(( VTtE{{TTUUU :5AA)+66...-333	**	/4	4	4	**CJJ;WWXXXL    scaledata_formatdtypeinput_data_formatc                    t          | t          j                  st          dt	          |                      |                     t          j                  |z  }|t          |||          }|                    |          }|S )a  
    Rescales `image` by `scale`.

    Args:
        image (`np.ndarray`):
            The image to rescale.
        scale (`float`):
            The scale to use for rescaling the image.
        data_format (`ChannelDimension`, *optional*):
            The channel dimension format of the image. If not provided, it will be the same as the input image.
        dtype (`np.dtype`, *optional*, defaults to `np.float32`):
            The dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature
            extractors.
        input_data_format (`ChannelDimension`, *optional*):
            The channel dimension format of the input image. If not provided, it will be inferred from the input image.

    Returns:
        `np.ndarray`: The rescaled image.
    r"   )r$   r%   r&   r'   r(   astypefloat64r0   )r   r2   r3   r4   r5   rescaled_images         r/   rescaler:   d   s    4 eRZ(( VTtE{{TTUUU\\"*--5N4^[Rcdd#**511Nr1   c                 F   | j         t          j        k    rd}nt          j        | |                     t
                              rmt          j        d| k              rt          j        | dk              rd}nt          d|                                  d| 	                                 d          t          j        d| k              rt          j        | dk              rd}n:t          d	|                                  d| 	                                 d          |S )
z
    Detects whether or not the image needs to be rescaled before being converted to a PIL image.

    The assumption is that if the image is of type `np.float` and all values are between 0 and 1, it needs to be
    rescaled.
    Fr      zZThe image to be converted to a PIL image contains values outside the range [0, 255], got [z, z%] which cannot be converted to uint8.r	   TzXThe image to be converted to a PIL image contains values outside the range [0, 1], got [)
r4   r%   uint8allcloser7   intallr,   minmax)r   
do_rescales     r/   _rescale_for_pil_conversionrD      s@    {bh

	UELL--	.	. 
6!u* 	"&#"6"6 	JJZ		Z Z',yy{{Z Z Z   
U
		 
uz 2 2 


VIIKKV V#(99;;V V V
 
 	
 r1   zPIL.Image.Imagetorch.Tensor	tf.Tensorzjnp.ndarrayrC   
image_modec                    t          t          dg           t          | t          j        j                  r| S t          |           st          |           r|                                 } nmt          |           rt          j
        |           } nIt          | t          j                  s/t          d                    t          |                               t          | t           j        |          } | j        d         dk    rt          j        | d          n| } |t)          |           n|}|rt+          | d          } |                     t          j                  } t          j                            | |          S )	a  
    Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
    needed.

    Args:
        image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`):
            The image to convert to the `PIL.Image` format.
        do_rescale (`bool`, *optional*):
            Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default
            to `True` if the image type is a floating type and casting to `int` would result in a loss of precision,
            and `False` otherwise.
        image_mode (`str`, *optional*):
            The mode to use for the PIL image. If unset, will use the default mode for the input image type.
        input_data_format (`ChannelDimension`, *optional*):
            The channel dimension format of the input image. If unset, will use the inferred format from the input.

    Returns:
        `PIL.Image.Image`: The converted image.
    visionz"Input image type not supported: {}r	   axisNr<   mode)r   to_pil_imager$   PILImager   r   numpyr   r%   arrayr&   r,   r-   r(   r0   r
   r+   shapesqueezerD   r:   r7   r=   	fromarray)r   rC   rG   r5   s       r/   rO   rO      s^   2 lXJ///%))  u Se!4!4 S	u		 Srz** S=DDT%[[QQRRR (/?/DFWXXE +0+b/Q*>*>BJu2&&&&EE 8B7I,U333zJ $s##LL""E9u:666r1   Tinput_imagesizedefault_to_squaremax_sizec                    t          |t          t          f          rMt          |          dk    rt          |          S t          |          dk    r	|d         }nt	          d          |r||fS t          | |          \  }}||k    r||fn||f\  }}|}	|	t          |	|z  |z            }}
|8||	k    rt	          d| d|           ||k    rt          ||
z  |z            |}}
||k    r||
fn|
|fS )a  
    Find the target (height, width) dimension of the output image after resizing given the input image and the desired
    size.

    Args:
        input_image (`np.ndarray`):
            The image to resize.
        size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):
            The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to
            this.

            If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
            `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this
            number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
        default_to_square (`bool`, *optional*, defaults to `True`):
            How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square
            (`size`,`size`). If set to `False`, will replicate
            [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
            with support for resizing only the smallest edge and providing an optional `max_size`.
        max_size (`int`, *optional*):
            The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater
            than `max_size` after being resized according to `size`, then the image is resized again so that the longer
            edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter
            than `size`. Only used if `default_to_square` is `False`.
        input_data_format (`ChannelDimension`, *optional*):
            The channel dimension format of the input image. If unset, will use the inferred format from the input.

    Returns:
        `tuple`: The target (height, width) dimension of the output image after resizing.
    r#   r	   r   z7size must have 1 or 2 elements if it is a list or tupleNzmax_size = zN must be strictly greater than the requested size for the smaller edge size = )r$   tuplelistlenr,   r   r?   )rW   rX   rY   rZ   r5   heightwidthshortlongrequested_new_short	new_shortnew_longs               r/   get_resize_output_image_sizerf      sM   J $&& Xt99>>;;YY!^^7DDVWWW d|";0ABBMFE%*f__5&//65/KE4-s3F3MPU3U/V/VxI***;h ; ;48; ;   h"%h&:X&E"F"FxI$)VOOHi  )X9NNr1   resampler   reducing_gapreturn_numpyc                 X   t          t          dg           ||nt          j        }t	          |          dk    st          d          |t          |           }||n|}d}t          | t          j	        j	                  s!t          |           }t          | ||          } |\  }}	|                     |	|f||          }
|rgt          j        |
          }
|
j        dk    rt          j        |
d	          n|
}
t!          |
|t"          j        
          }
|rt'          |
d          n|
}
|
S )a  
    Resizes `image` to `(height, width)` specified by `size` using the PIL library.

    Args:
        image (`np.ndarray`):
            The image to resize.
        size (`Tuple[int, int]`):
            The size to use for resizing the image.
        resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
            The filter to user for resampling.
        reducing_gap (`int`, *optional*):
            Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to
            the fair resampling. See corresponding Pillow documentation for more details.
        data_format (`ChannelDimension`, *optional*):
            The channel dimension format of the output image. If unset, will use the inferred format from the input.
        return_numpy (`bool`, *optional*, defaults to `True`):
            Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is
            returned.
        input_data_format (`ChannelDimension`, *optional*):
            The channel dimension format of the input image. If unset, will use the inferred format from the input.

    Returns:
        `np.ndarray`: The resized image.
    rI   Nr#   zsize must have 2 elementsF)rC   r5   )rg   rh   rJ   rK   r   gp?)r   resizer   BILINEARr^   r,   r   r$   rP   rQ   rD   rO   r%   rS   ndimexpand_dimsr0   r
   r+   r:   )r   rX   rg   rh   r3   ri   r5   rC   r_   r`   resized_images              r/   rl   rl     sW   B fxj)))#/xx5G5PHt99>>4555  :5AA'2':##K JeSY_-- `077
UzM^___MFELL%8R^L__M Y// CPBTXYBYBY}2>>>>_l3;:J:O
 
 

 <FXw777=r1   meanstdc                 l   t          | t          j                  st          d          |t	          |           }t          | |          }| j        |         }t          j        | j        t          j	                  s| 
                    t          j                  } t          |t                    r6t          |          |k    r"t          d| dt          |                     n|g|z  }t          j        || j                  }t          |t                    r6t          |          |k    r"t          d| dt          |                     n|g|z  }t          j        || j                  }|t          j        k    r	| |z
  |z  } n| j        |z
  |z  j        } |t%          | ||          n| } | S )a  
    Normalizes `image` using the mean and standard deviation specified by `mean` and `std`.

    image = (image - mean) / std

    Args:
        image (`np.ndarray`):
            The image to normalize.
        mean (`float` or `Iterable[float]`):
            The mean to use for normalization.
        std (`float` or `Iterable[float]`):
            The standard deviation to use for normalization.
        data_format (`ChannelDimension`, *optional*):
            The channel dimension format of the output image. If unset, will use the inferred format from the input.
        input_data_format (`ChannelDimension`, *optional*):
            The channel dimension format of the input image. If unset, will use the inferred format from the input.
    zimage must be a numpy arrayN)r5   zmean must have z$ elements if it is an iterable, got r4   zstd must have )r$   r%   r&   r,   r   r   rT   
issubdtyper4   floatingr7   float32r   r^   rS   r
   r+   Tr0   )r   rq   rr   r3   r5   channel_axisnum_channelss          r/   	normalizer{   f  s   0 eRZ(( 86777 :5AA-eGXYYYL;|,L =bk22 )RZ(($!! %t99$$l|lladeiajajllmmm % v$8D,,,D#x   #s88|##jljj`cdg`h`hjjkkk $ el"
(3ek
*
*
*C,111$'D.C'*R]Ri'{<MNNNotELr1   c                    t          t          dg           |t          j        dt                     |dn|}t          | t          j                  st          dt          |                      t          |t                    rt          |          dk    rt          d          |t          |           }||n|}t          | t          j        |          } t#          | t          j                  \  }}|\  }}	t%          |          t%          |	          }	}||z
  dz  }
|
|z   }||	z
  dz  }||	z   }|
dk    r>||k    r8|dk    r2||k    r,| d	|
|||f         } t          | |t          j                  } | S t'          ||          }t'          |	|          }| j        dd
         ||fz   }t          j        | |          }t-          ||z
  dz            }||z   }t-          ||z
  dz            }||z   }| |d	||||f<   |
|z  }
||z  }||z  }||z  }|d	t'          d|
          t/          ||          t'          d|          t/          ||          f         }t          ||t          j                  }|st1          |          }|S )a  
    Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to
    the size given, it will be padded (so the returned result will always be of size `size`).

    Args:
        image (`np.ndarray`):
            The image to crop.
        size (`Tuple[int, int]`):
            The target size for the cropped image.
        data_format (`str` or `ChannelDimension`, *optional*):
            The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            If unset, will use the inferred format of the input image.
        input_data_format (`str` or `ChannelDimension`, *optional*):
            The channel dimension format for the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            If unset, will use the inferred format of the input image.
        return_numpy (`bool`, *optional*):
            Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the
            previous ImageFeatureExtractionMixin method.
                - Unset: will return the same type as the input image.
                - `True`: will return a numpy array.
                - `False`: will return a `PIL.Image.Image` object.
    Returns:
        `np.ndarray`: The cropped image.
    rI   Nz8return_numpy is deprecated and will be removed in v.4.33Tr"   r#   zOsize must have 2 elements representing the height and width of the output imager   .)rT   )r   center_cropwarningswarnFutureWarningr$   r%   r&   r'   r(   r   r^   r,   r   r0   r
   r)   r   r?   rB   rT   
zeros_liker   rA   rO   )r   rX   r3   r5   ri   output_data_formatorig_height
orig_widthcrop_height
crop_widthtopbottomleftright
new_height	new_width	new_shape	new_imagetop_pad
bottom_padleft_pad	right_pads                         r/   r~   r~     s   F kH:...PR_```'/44\LeRZ(( VTtE{{TTUUUdH%% lTajkkk :5AA(3(?EV (/?/EGXYYE,U4D4JKKK"K!+..JK $
*C;F#)D:E axxFk))daiiEZ<O<Oc3v:tEz12+E3EGWG]^^ [+..JJ
++ICRC J	#::Ie9555I J,122G;&JY+q011H:%I=BIc7:%x	'99:7NC
gFHD	XE#s1c{{SV-D-DDc!TllUXYbdiUjUjFjjkI+I7IK[KabbI , ++	r1   bboxes_centerc                     |                      d          \  }}}}t          j        |d|z  z
  |d|z  z
  |d|z  z   |d|z  z   gd          }|S )NrJ         ?dimunbindtorchstack)r   center_xcenter_yr`   r_   bbox_cornerss         r/   _center_to_corners_format_torchr     st    (5(<(<R(@(@%Hhv;
S5[
 HsV|$;x#PU+?UYadgjpdpYpr  L
 r1   c                     | j         \  }}}}t          j        |d|z  z
  |d|z  z
  |d|z  z   |d|z  z   gd          }|S )Nr   rJ   rK   rx   r%   r   r   r   r   r`   r_   bboxes_cornerss         r/   _center_to_corners_format_numpyr     sg    (5%HhvX	C%K	C&L!8(S5[:PRZ]`ci]iRij  N
 r1   c                     t          j        | d          \  }}}}t          j        |d|z  z
  |d|z  z
  |d|z  z   |d|z  z   gd          }|S )NrJ   rK   r   tfunstackr   r   s         r/   _center_to_corners_format_tfr     st    (*
=r(J(J(J%HhvX	C%K	C&L!8(S5[:PRZ]`ci]iRij  N
 r1   c                 
   t          |           rt          |           S t          | t          j                  rt          |           S t          |           rt          |           S t          dt          |                      )a|  
    Converts bounding boxes from center format to corners format.

    center format: contains the coordinate for the center of the box and its width, height dimensions
        (center_x, center_y, width, height)
    corners format: contains the coodinates for the top-left and bottom-right corners of the box
        (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
    Unsupported input type )
r   r   r$   r%   r&   r   r   r   r,   r(   )r   s    r/   center_to_corners_formatr   %  s     }%% ;.}===	M2:	.	. ;.}===	m	$	$ ;+M:::
DtM/B/BDD
E
EEr1   r   c                     |                      d          \  }}}}||z   dz  ||z   dz  ||z
  ||z
  g}t          j        |d          S )NrJ   r#   r   r   )r   
top_left_x
top_left_ybottom_right_xbottom_right_ybs         r/   _corners_to_center_format_torchr   :  sg    =K=R=RSU=V=V:J
NN	n	$)	n	$)	*	$	*	$		A ;qb!!!!r1   c                 v    | j         \  }}}}t          j        ||z   dz  ||z   dz  ||z
  ||z
  gd          }|S )Nr#   rJ   rK   r   r   r   r   r   r   r   s         r/   _corners_to_center_format_numpyr   E  se    =K=M:J
NNH.(A-.(A-j(j(		
   M r1   c                     t          j        | d          \  }}}}t          j        ||z   dz  ||z   dz  ||z
  ||z
  gd          }|S )NrJ   rK   r#   r   r   s         r/   _corners_to_center_format_tfr   S  sr    =?Z]_=`=`=`:J
NNH.(A-.(A-j(j(		
   M r1   c                 
   t          |           rt          |           S t          | t          j                  rt          |           S t          |           rt          |           S t          dt          |                      )a  
    Converts bounding boxes from corners format to center format.

    corners format: contains the coordinates for the top-left and bottom-right corners of the box
        (top_left_x, top_left_y, bottom_right_x, bottom_right_y)
    center format: contains the coordinate for the center of the box and its the width, height dimensions
        (center_x, center_y, width, height)
    r   )
r   r   r$   r%   r&   r   r   r   r,   r(   )r   s    r/   corners_to_center_formatr   a  s     ~&& <.~>>>	NBJ	/	/ <.~>>>	n	%	% <+N;;;
EtN/C/CEE
F
FFr1   c                    t          | t          j                  rt          | j                  dk    ri| j        t          j        k    r|                     t          j                  } | dddddf         d| dddddf         z  z   d| dddddf         z  z   S t          | d         d| d         z  z   d| d         z  z             S )z*
    Converts RGB color to unique ID.
       Nr      r	   i   r#   )
r$   r%   r&   r^   rT   r4   r=   r7   int32r?   )colors    r/   	rgb_to_idr   x  s     %$$ RU[)9)9Q)>)>;"(""LL**EQQQ1W~eAAAqqq!Gn 44y5AAAq>7QQQuQx#a.(9uQx+??@@@r1   c                    t          | t          j                  r||                                 }t	          t          | j                  dgz             }t          j        |t          j                  }t          d          D ]}|dz  |d|f<   |dz  }|S g }t          d          D ]}|
                    | dz             | dz  }  |S )z*
    Converts unique ID to RGB color.
    r   rt   r   .)r$   r%   r&   copyr\   r]   rT   zerosr=   rangeappend)id_mapid_map_copy	rgb_shapergb_mapir   _s          r/   	id_to_rgbr     s     &"*%% kkmm$v|,,s233	(9BH555q 	  	 A)C/GCFOCKKE1XX  Vc\"""3Lr1   c                   "    e Zd ZdZdZdZdZdZdS )PaddingModezP
    Enum class for the different padding modes to use when padding images.
    constantreflect	replicate	symmetricN)__name__
__module____qualname____doc__CONSTANTREFLECT	REPLICATE	SYMMETRIC r1   r/   r   r     s.          HGIIIIr1   r   g        paddingrN   constant_valuesc                     t                      fd} ||          }|t          j        k    r$ ||          }t          j         |d|           n|t          j        k    rt          j         |d           nb|t          j        k    rt          j         |d           n:|t          j        k    rt          j         |d           nt          d	|           |t           |          n   S )
a  
    Pads the `image` with the specified (height, width) `padding` and `mode`.

    Args:
        image (`np.ndarray`):
            The image to pad.
        padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):
            Padding to apply to the edges of the height, width axes. Can be one of three formats:
            - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.
            - `((before, after),)` yields same before and after pad for height and width.
            - `(pad,)` or int is a shortcut for before = after = pad width for all axes.
        mode (`PaddingMode`):
            The padding mode to use. Can be one of:
                - `"constant"`: pads with a constant value.
                - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the
                  vector along each axis.
                - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis.
                - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array.
        constant_values (`float` or `Iterable[float]`, *optional*):
            The value to use for the padding if `mode` is `"constant"`.
        data_format (`str` or `ChannelDimension`, *optional*):
            The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            If unset, will use same as the input image.
        input_data_format (`str` or `ChannelDimension`, *optional*):
            The channel dimension format for the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            If unset, will use the inferred format of the input image.

    Returns:
        `np.ndarray`: The padded image.

    Nc                    t          | t          t          f          r	| | f| | ff} nt          | t                    r4t	          |           dk    r!| d         | d         f| d         | d         ff} nt          | t                    r3t	          |           dk    r t          | d         t                    r| | f} nXt          | t                    r1t	          |           dk    rt          | d         t                    r| } nt          d|            t          j        k    rdg| R ng | dR } j        dk    rdg| R n| } | S )za
        Convert values to be in the format expected by np.pad based on the data format.
        r	   r   r#   zUnsupported format: )r   r      )	r$   r?   floatr\   r^   r,   r
   r)   rn   )valuesr   r5   s    r/   _expand_for_data_formatz$pad.<locals>._expand_for_data_format  sh    fsEl++ 		>v&(89FF&& 	>3v;;!+;+;ay&),vay&).DEFF&& 	>3v;;!+;+;
6RS9VY@Z@Z+;f%FF&& 	>3v;;!+;+;
6RS9V[@\@\+;FF<F<<=== '8;K;Q&Q&Q&"6"""WhY_WhagWhWh "'q!ffr1   r   )rN   r   r   rM   edger   zInvalid padding mode: )
r   r   r   r%   padr   r   r   r,   r0   )r   r   rN   r   r3   r5   r   s   `    ` r/   r   r     s/   V  :5AA     , &%g..G{###11/BBugJXXX	$	$	$ugI666	&	&	&ugF333	&	&	&ugK8888$88999R]Ri'{<MNNNotELr1   c                     t          t          dg           t          | t          j        j                  s| S | j        dk    r| S |                     d          } | S )z
    Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
    as is.
    Args:
        image (Image):
            The image to convert.
    rI   RGB)r   convert_to_rgbr$   rP   rQ   rN   convert)r   s    r/   r   r     sY     nxj111eSY_-- zUMM%  ELr1   c                     |t          |           n|}|t          j        k    r| ddddf         } n0|t          j        k    r| ddddf         } nt	          d|           |t          | ||          } | S )a  
    Flips the channel order of the image.

    If the image is in RGB format, it will be converted to BGR and vice versa.

    Args:
        image (`np.ndarray`):
            The image to flip.
        data_format (`ChannelDimension`, *optional*):
            The channel dimension format for the output image. Can be one of:
                - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            If unset, will use same as the input image.
        input_data_format (`ChannelDimension`, *optional*):
            The channel dimension format for the input image. Can be one of:
                - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            If unset, will use the inferred format of the input image.
    N.rJ   zUnsupported channel dimension: rk   )r   r
   r+   r)   r,   r0   )r   r3   r5   s      r/   flip_channel_orderr     s    0 BSAZ6u===`q,111c44R4i 	.4	4	4dddCi N;LNNOOO+E;RcdddLr1   c                 V    |                                  r| S |                                 S N)is_floating_pointr   )xs    r/   _cast_tensor_to_floatr   2  s)     7799r1   c                   ,    e Zd ZdZddedefdZdd	Zd
S )FusedRescaleNormalizez<
    Rescale and normalize the input image in one step.
          ?Frescale_factorinplacec                     t          j        |          d|z  z  | _        t          j        |          d|z  z  | _        || _        d S )Nr   )r   tensorrq   rr   r   )selfrq   rr   r   r   s        r/   __init__zFusedRescaleNormalize.__init__=  sC    L&&#*>?	<$$n(<=r1   r   rE   c                 n    t          |          }t          j        || j        | j        | j                  S )N)r   )r   Fr{   rq   rr   r   r   r   s     r/   __call__zFusedRescaleNormalize.__call__B  s.    %e,,{5$)TXt|LLLLr1   N)r   Fr   rE   )r   r   r   r   r   boolr   r   r   r1   r/   r   r   8  s_          %     
M M M M M Mr1   r   c                   (    e Zd ZdZd	defdZd
dZdS )RescalezM
    Rescale the input image by rescale factor: image *= rescale_factor.
    r   r   c                     || _         d S r   r   )r   r   s     r/   r   zRescale.__init__L  s    ,r1   r   rE   c                     || j         z  }|S r   r  r   s     r/   r   zRescale.__call__O  s    ++r1   N)r   r   )r   r   r   r   r   r   r   r   r1   r/   r  r  G  sR         - -u - - - -     r1   r  c                   (    e Zd ZdZdej        fdZdS )NumpyToTensorz4
    Convert a numpy array to a PyTorch tensor.
    r   c                 x    t          j        |                    ddd                                                    S )Nr#   r   r	   )r   
from_numpyr*   
contiguousr   s     r/   r   zNumpyToTensor.__call__Y  s1     1a 8 899DDFFFr1   N)r   r   r   r   r%   r&   r   r   r1   r/   r  r  T  sB         Gbj G G G G G Gr1   r  r   )NNN)TNN)NNNTN)NN)r   rE   r    rE   )r   rF   r    rF   )r   rE   r    rE   )r   rF   r    rF   )Mr   mathr   typingr   r   r   r   r   rR   r%   image_utilsr
   r   r   r   r   utilsr   r   r   r   r   utils.import_utilsr   r   r   r   r   r   r   rP   r   r   
tensorflowr   	jax.numpyjnptorchvision.transforms.v2r   r   torchvision.transformsr&   strr0   rw   r   r4   r:   rD   r   rO   r?   r\   rf   rl   r{   r~   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r  r  r   r1   r/   <module>r     s	           9 9 9 9 9 9 9 9 9 9 9 9 9 9                  Z Y Y Y Y Y Y Y Y Y Y Y Y Y                   0JJJ////// LLL?     79999999 7666666 AE$ $:$',-$  &6&; <=$ Z	$ $ $ $T /3j@D# #:## *+# 8	#
  c+;&; <=# Z# # # #L  : "& $@D	37 37.]Z[3737 37  c+;&; <=	37
 37 37 37 37t #"@D@O @O@O
U38_d3is;
<@O @O sm	@O
  c+;&; <=@O @O @O @O @OL &*"&.2@DD D:D
S/D #D 3-	D
 *+D D  c+;&; <=D ZD D D DV /3@D: :::
x&
': 
uhuo%	&: *+	:
  c+;&; <=: Z: : : :@ ;?@D#'` `:`
S/` %%5 567`  c+;&; <=	`
 4.` Z` ` ` `F   2: "*       FJ F: F F F F*" " " "BJ 2:       GZ GJ G G G G.A A A  &    ,    $,58:>@DS S:S3c3h%S/)BBCS S 5(5/12	S
 %%5 567S  c+;&; <=S ZS S S Sn*     , /3@D# #:#*+#  c+;&; <=# Z	# # # #L  M M M M M M M M
 
 
 
 
 
 
 
G G G G G G G G G Gr1   