
    ڧgF                     6   d dl mZ d dlmZmZmZmZmZ d dlZd dl	m
Z
 d dlmZ d dlmZmZmZmZmZmZmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlm Z m!Z!m"Z" g dZ# G d de          Z$ G d de          Z% G d de          Z&deee$e%f                  dee'         dee         de(de(dede&fdZ)deddd d!Z* G d" d#e          Z+ G d$ d%e          Z, G d& d'e          Z- G d( d)e          Z. ed*+           ed,d- f.          dd/d0d1deee+ef                  de(de(dede&f
d2                        Z/ ed3+           ed,d4 f.          dd/d0d1deee,ef                  de(de(dede&f
d5                        Z0 ed6+           ed,d7 f.          dd/d0d1deee-ef                  de(de(dede&f
d8                        Z1 ed9+           ed,d: f.          dd/d0d1deee.ef                  de(de(dede&f
d;                        Z2dS )<    )partial)AnyListOptionalTypeUnionN)Tensor)
BasicBlock
BottleneckResNetResNet18_WeightsResNet50_WeightsResNeXt101_32X8D_WeightsResNeXt101_64X4D_Weights   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface   )_fuse_modules_replace_reluquantize_model)	QuantizableResNetResNet18_QuantizedWeightsResNet50_QuantizedWeights!ResNeXt101_32X8D_QuantizedWeights!ResNeXt101_64X4D_QuantizedWeightsresnet18resnet50resnext101_32x8dresnext101_64x4dc                   Z     e Zd Zdededdf fdZdedefdZd
dee         ddfd	Z	 xZ
S )QuantizableBasicBlockargskwargsreturnNc                      t                      j        |i | t          j        j                                        | _        d S N)super__init__torchnn	quantizedFloatFunctionaladd_reluselfr)   r*   	__class__s      b/var/www/html/ai-engine/env/lib/python3.11/site-packages/torchvision/models/quantization/resnet.pyr/   zQuantizableBasicBlock.__init__&   s;    $)&)))*::<<    xc                 J   |}|                      |          }|                     |          }|                     |          }|                     |          }|                     |          }| j        |                     |          }| j                            ||          }|S r-   )conv1bn1reluconv2bn2
downsampler4   r6   r:   identityouts       r8   forwardzQuantizableBasicBlock.forward*   s    jjmmhhsmmiinnjjoohhsmm?&q))Hm$$S(33
r9   is_qatc                 ~    t          | g dddgg|d           | j        rt          | j        ddg|d           d S d S )Nr<   r=   r>   r?   r@   Tinplace01r   rA   r6   rF   s     r8   
fuse_modelz QuantizableBasicBlock.fuse_model;   se    d5557GH&Z^____? 	M$/C:vtLLLLLL	M 	Mr9   r-   __name__
__module____qualname__r   r/   r	   rE   r   boolrO   __classcell__r7   s   @r8   r(   r(   %   s        =c =S =T = = = = = = F    "M M$ M4 M M M M M M M Mr9   r(   c                   Z     e Zd Zdededdf fdZdedefdZd
dee         ddfd	Z	 xZ
S )QuantizableBottleneckr)   r*   r+   Nc                      t                      j        |i | t          j                                        | _        t          j        d          | _        t          j        d          | _        d S )NFrI   )	r.   r/   r1   r2   r3   skip_add_reluReLUrelu1relu2r5   s      r8   r/   zQuantizableBottleneck.__init__B   sa    $)&)))\99;;WU+++
WU+++


r9   r:   c                    |}|                      |          }|                     |          }|                     |          }|                     |          }|                     |          }|                     |          }|                     |          }|                     |          }| j        |                     |          }| j	        
                    ||          }|S r-   )r<   r=   r\   r?   r@   r]   conv3bn3rA   rZ   r4   rB   s       r8   rE   zQuantizableBottleneck.forwardH   s    jjmmhhsmmjjoojjoohhsmmjjoojjoohhsmm?&q))H ))#x88
r9   rF   c                     t          | g dg dddgg|d           | j        rt          | j        ddg|d           d S d S )	N)r<   r=   r\   )r?   r@   r]   r_   r`   TrI   rK   rL   rM   rN   s     r8   rO   z QuantizableBottleneck.fuse_modelZ   s{    ,,,.G.G.G'SXIYZ\blp	
 	
 	
 	
 ? 	M$/C:vtLLLLLL	M 	Mr9   r-   rP   rV   s   @r8   rX   rX   A   s        ,c ,S ,T , , , , , , F    $M M$ M4 M M M M M M M Mr9   rX   c                   Z     e Zd Zdededdf fdZdedefdZd
dee         ddfd	Z	 xZ
S )r   r)   r*   r+   Nc                      t                      j        |i | t          j        j                                        | _        t          j        j                                        | _        d S r-   )	r.   r/   r0   aoquantization	QuantStubquantDeQuantStubdequantr5   s      r8   r/   zQuantizableResNet.__init__c   sS    $)&)))X*4466
x,88::r9   r:   c                     |                      |          }|                     |          }|                     |          }|S r-   )rg   _forward_implri   )r6   r:   s     r8   rE   zQuantizableResNet.forwardi   s:    JJqMM q!!LLOOr9   rF   c                     t          | g d|d           |                                 D ]C}t          |          t          u st          |          t          u r|                    |           DdS )a  Fuse conv/bn/relu modules in resnet models

        Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        rH   TrI   N)r   modulestyperX   r(   rO   )r6   rF   ms      r8   rO   zQuantizableResNet.fuse_modelr   sy     	d444fdKKKK 	% 	%AAww///477>S3S3SV$$$	% 	%r9   r-   rP   rV   s   @r8   r   r   b   s        ;c ;S ;T ; ; ; ; ; ; F    
% 
%$ 
%4 
% 
% 
% 
% 
% 
% 
% 
%r9   r   blocklayersweightsprogressquantizer*   r+   c                    |Nt          |dt          |j        d                              d|j        v rt          |d|j        d                    |                    dd          }t	          | |fi |}t          |           |rt          ||           |*|                    |                    |d                     |S )Nnum_classes
categoriesbackendfbgemmT)rs   
check_hash)	r   lenmetapopr   r   r   load_state_dictget_state_dict)rp   rq   rr   rs   rt   r*   rx   models           r8   _resnetr      s     fmSl9S5T5TUUU$$!&)W\)5LMMMjjH--GeV66v66E% 'ug&&&g44hSW4XXYYYLr9   )r   r   ry   zdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelsz
        These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
        weights listed below.
    )min_sizerw   rx   recipe_docsc                   l    e Zd Z ed eed          i edej        ddddid	d
d          Z	e	Z
dS )r   zJhttps://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth   	crop_sizei(^ ImageNet-1KgV-_Q@g r8V@zacc@1zacc@5g/$?g`"y&@
num_paramsunquantized_metrics_ops
_file_sizeurl
transformsr|   N)rQ   rR   rS   r   r   r   _COMMON_METAr   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULT r9   r8   r   r      s        "7X7.#>>>

"+9##     
 
 
  " #GGGr9   r   c                       e Zd Z ed eed          i edej        ddddid	d
d          Z	 ed eedd          i edej
        ddddid	dd          ZeZdS )r    zJhttps://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pthr   r   i(r   g{GR@gjt4W@r   gB`"[@gM8@r   r   zJhttps://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth   r   resize_sizeg5^IT@gX9vW@g8@N)rQ   rR   rS   r   r   r   r   r   r   r   IMAGENET1K_V2IMAGENET1K_FBGEMM_V2r   r   r9   r8   r    r       s        "7X7.#>>>

"+9##     
 
 
  " #7X7.#3OOO

"+9##     
 
 
  " #GGGr9   r    c                       e Zd Z ed eed          i edej        ddddid	d
d          Z	 ed eedd          i edej
        ddddid	dd          ZeZdS )r!   zQhttps://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pthr   r   i(Jr   gvS@gQW@r   gDli0@gV-U@r   r   zQhttps://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pthr   r   g~jT@g rX@gzGU@N)rQ   rR   rS   r   r   r   r   r   r   r   r   r   r   r   r9   r8   r!   r!      s        "7_7.#>>>

"3A##     
 
 
  " #7_7.#3OOO

"3A##     
 
 
  " #GGGr9   r!   c                   p    e Zd Z ed eedd          i eddej        ddd	d
iddd          Z	e	Z
dS )r"   zRhttps://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pthr   r   r   i(mz+https://github.com/pytorch/vision/pull/5935r   gxT@g/X@r   gQ.@g$cT@)r   r   r   r   r   r   r   N)rQ   rR   rS   r   r   r   r   r   r   r   r   r   r9   r8   r"   r"     s        "7`7.#3OOO

"C3A##     
 
 
  $ #GGGr9   r"   quantized_resnet18)name
pretrainedc                 ^    |                      dd          rt          j        nt          j        S Nrt   F)getr   r   r   r   r*   s    r8   <lambda>r     ,    ::j%((,0EE+ r9   )rr   TF)rr   rs   rt   c                 x    |rt           nt                              |           } t          t          g d| ||fi |S )a  ResNet-18 model from
    `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNet18_Weights
        :members:
        :noindex:
    )r   r   r   r   )r   r   verifyr   r(   rr   rs   rt   r*   s       r8   r#   r#     I    Z -5J((:JRRSZ[[G(,,,8^^W]^^^r9   quantized_resnet50c                 ^    |                      dd          rt          j        nt          j        S r   )r   r    r   r   r   r   s    r8   r   r   Q  r   r9   c                 x    |rt           nt                              |           } t          t          g d| ||fi |S )a  ResNet-50 model from
    `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNet50_Weights
        :members:
        :noindex:
    )r         r   )r    r   r   r   rX   r   s       r8   r$   r$   M  r   r9   quantized_resnext101_32x8dc                 ^    |                      dd          rt          j        nt          j        S r   )r   r!   r   r   r   r   s    r8   r   r     ,    ::j%((48MM%3 r9   c                     |rt           nt                              |           } t          |dd           t          |dd           t	          t
          g d| ||fi |S )a  ResNeXt-101 32x8d model from
    `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
        :members:
        :noindex:
    groups    width_per_group   r   r      r   )r!   r   r   r   r   rX   r   s       r8   r%   r%     n    Z 5=Z00BZbbcjkkG&(B///&"3Q777(---(H__X^___r9   quantized_resnext101_64x4dc                 ^    |                      dd          rt          j        nt          j        S r   )r   r"   r   r   r   r   s    r8   r   r     r   r9   c                     |rt           nt                              |           } t          |dd           t          |dd           t	          t
          g d| ||fi |S )a  ResNeXt-101 64x4d model from
    `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
        :members:
        :noindex:
    r   @   r   r   r   )r"   r   r   r   r   rX   r   s       r8   r&   r&     r   r9   )3	functoolsr   typingr   r   r   r   r   r0   torch.nnr1   r	   torchvision.models.resnetr
   r   r   r   r   r   r   transforms._presetsr   _apir   r   r   _metar   _utilsr   r   utilsr   r   r   __all__r(   rX   r   intrT   r   r   r   r    r!   r"   r#   r$   r%   r&   r   r9   r8   <module>r      s6         3 3 3 3 3 3 3 3 3 3 3 3 3 3                               7 6 6 6 6 6 7 7 7 7 7 7 7 7 7 7 ( ( ( ( ( ( C C C C C C C C ? ? ? ? ? ? ? ? ? ?
 
 
M M M M MJ M M M8M M M M MJ M M MB% % % % % % % %:+-BBCDI k" 	
      4 &t	 	# # # # # # # #*## ## ## ## ## ## ## ##L## ## ## ## ## ## ## ##L# # # # # # # #, )***	, 	,   MQ	&_ &_ &_e57GGHI&_ &_ 	&_
 &_ &_ &_ &_  +*&_R )***	, 	,   MQ	&_ &_ &_e57GGHI&_ &_ 	&_
 &_ &_ &_ &_  +*&_R 1222	4 	4   ]a	(` (` (`e=?WWXY(` (` 	(`
 (` (` (` (`  32(`V 1222	4 	4   ]a	(` (` (`e=?WWXY(` (` 	(`
 (` (` (` (`  32(` (` (`r9   