
    ΧgiO                        d dl Z d dlmc mZ d dlmZ ddlmZm	Z	 g dZ
 G d de	          Z G d d	e          Z G d
 dee          Z G d de          Z G d dee          Z G d de          Z G d dee          ZdS )    N)Tensor   )_LazyNormBase	_NormBase)InstanceNorm1dInstanceNorm2dInstanceNorm3dLazyInstanceNorm1dLazyInstanceNorm2dLazyInstanceNorm3dc                   x     e Zd Z	 	 	 	 	 	 ddedededed	ed
df fdZd Zd Zd Z	d Z
 fdZded
efdZ xZS )_InstanceNormh㈵>皙?FNnum_featuresepsmomentumaffinetrack_running_statsreturnc                 N    ||d} t                      j        |||||fi | d S )N)devicedtype)super__init__)
selfr   r   r   r   r   r   r   factory_kwargs	__class__s
            Y/var/www/html/ai-engine/env/lib/python3.11/site-packages/torch/nn/modules/instancenorm.pyr   z_InstanceNorm.__init__   sP     %+U;;#x1D	
 	
HV	
 	
 	
 	
 	
    c                     t           NNotImplementedErrorr   inputs     r   _check_input_dimz_InstanceNorm._check_input_dim%       !!r    c                     t           r"   r#   r   s    r   _get_no_batch_dimz_InstanceNorm._get_no_batch_dim(   r(   r    c                 x    |                      |                    d                                        d          S )Nr   )_apply_instance_norm	unsqueezesqueezer%   s     r   _handle_no_batch_inputz$_InstanceNorm._handle_no_batch_input+   s0    ((););<<DDQGGGr    c           
          t          j        || j        | j        | j        | j        | j        p| j         | j        | j        nd| j	                  S )Ng        )
Finstance_normrunning_meanrunning_varweightbiastrainingr   r   r   r%   s     r   r-   z"_InstanceNorm._apply_instance_norm.   sT    KIM9!99!]6DMMCH	
 	
 		
r    c           	         |                     dd           }|| j        sg }	dD ] }
||
z   }||v r|	                    |           !t          |	          dk    rk|                    d                    d                    d |	D                       | j        j                             |	D ]}|                    |           t                      
                    |||||||           d S )Nversion)r4   r5   r   a  Unexpected running stats buffer(s) {names} for {klass} with track_running_stats=False. If state_dict is a checkpoint saved before 0.4.0, this may be expected because {klass} does not track running stats by default since 0.4.0. Please remove these keys from state_dict. If the running stats are actually needed, instead set track_running_stats=True in {klass} to enable them. See the documentation of {klass} for details.z and c              3   "   K   | ]
}d | d V  dS )"N ).0ks     r   	<genexpr>z6_InstanceNorm._load_from_state_dict.<locals>.<genexpr>W   s*      *P*P8q888*P*P*P*P*P*Pr    )namesklass)getr   appendlenformatjoinr   __name__popr   _load_from_state_dict)r   
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsr:   running_stats_keysnamekeyr   s               r   rJ   z#_InstanceNorm._load_from_state_dict:   s<    !$$Y55 ?4#;?!#7 3 3tm*$$&--c222%&&**!!@ AG%ll*P*P=O*P*P*PPP"n5 AG A A   . ( (CNN3''''%%	
 	
 	
 	
 	
r    r&   c           
         |                      |           |                                |                                 z
  }|                    |          | j        k    rP| j        r1t          d| d| j         d|                    |           d          t          j        d| d           |                                |                                 k    r| 	                    |          S | 
                    |          S )Nzexpected input's size at dim=z to match num_features (z), but got: .zinput's size at dim=z does not match num_features. You can silence this warning by not passing in num_features, which is not used because affine=False)r'   dimr+   sizer   r   
ValueErrorwarningswarnr0   r-   )r   r&   feature_dims      r   forwardz_InstanceNorm.forwardh   s&   e$$$iikkD$:$:$<$<<::k""d&777{ 
 SK S S*S S8=

;8O8OS S S  
 =; = = =   99;;$002222..u555((///r    )r   r   FFNN)rH   
__module____qualname__intfloatboolr   r'   r+   r0   r-   rJ   r   r]   __classcell__)r   s   @r   r   r      s        $)
 

 
 	

 
 "
 

 
 
 
 
 
" " "" " "H H H

 

 

,
 ,
 ,
 ,
 ,
\0V 0 0 0 0 0 0 0 0 0r    r   c                       e Zd ZdZd Zd ZdS )r   a  Applies Instance Normalization.

    This operation applies Instance Normalization
    over a 2D (unbatched) or 3D (batched) input as described in the paper
    `Instance Normalization: The Missing Ingredient for Fast Stylization
    <https://arxiv.org/abs/1607.08022>`__.

    .. math::

        y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

    The mean and standard-deviation are calculated per-dimension separately
    for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
    of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``.
    The standard-deviation is calculated via the biased estimator, equivalent to
    `torch.var(input, unbiased=False)`.

    By default, this layer uses instance statistics computed from input data in
    both training and evaluation modes.

    If :attr:`track_running_stats` is set to ``True``, during training this
    layer keeps running estimates of its computed mean and variance, which are
    then used for normalization during evaluation. The running estimates are
    kept with a default :attr:`momentum` of 0.1.

    .. note::
        This :attr:`momentum` argument is different from one used in optimizer
        classes and the conventional notion of momentum. Mathematically, the
        update rule for running statistics here is
        :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
        where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
        new observed value.

    .. note::
        :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but
        have some subtle differences. :class:`InstanceNorm1d` is applied
        on each channel of channeled data like multidimensional time series, but
        :class:`LayerNorm` is usually applied on entire sample and often in NLP
        tasks. Additionally, :class:`LayerNorm` applies elementwise affine
        transform, while :class:`InstanceNorm1d` usually don't apply affine
        transform.

    Args:
        num_features: number of features or channels :math:`C` of the input
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, L)` or :math:`(C, L)`
        - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)

    Examples::

        >>> # Without Learnable Parameters
        >>> m = nn.InstanceNorm1d(100)
        >>> # With Learnable Parameters
        >>> m = nn.InstanceNorm1d(100, affine=True)
        >>> input = torch.randn(20, 100, 40)
        >>> output = m(input)
    c                     dS N   r=   r*   s    r   r+   z InstanceNorm1d._get_no_batch_dim       qr    c                 |    |                                 dvr%t          d|                                  d          d S N)rg      zexpected 2D or 3D input (got D input)rW   rY   r%   s     r   r'   zInstanceNorm1d._check_input_dim   ?    99;;f$$RUYY[[RRRSSS %$r    NrH   r^   r_   __doc__r+   r'   r=   r    r   r   r      sD        B BH  T T T T Tr    r   c                   "    e Zd ZdZeZd Zd ZdS )r
   a  A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of the ``num_features`` argument.

    The ``num_features`` argument of the :class:`InstanceNorm1d` is inferred from the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, L)` or :math:`(C, L)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, L)` or :math:`(C, L)`
        - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
    c                     dS rf   r=   r*   s    r   r+   z$LazyInstanceNorm1d._get_no_batch_dim   rh   r    c                 |    |                                 dvr%t          d|                                  d          d S rj   rm   r%   s     r   r'   z#LazyInstanceNorm1d._check_input_dim   rn   r    N)rH   r^   r_   rp   r   cls_to_becomer+   r'   r=   r    r   r
   r
      sH         4 #M  T T T T Tr    r
   c                       e Zd ZdZd Zd ZdS )r   a$  Applies Instance Normalization.

    This operation applies Instance Normalization
    over a 4D input (a mini-batch of 2D inputs
    with additional channel dimension) as described in the paper
    `Instance Normalization: The Missing Ingredient for Fast Stylization
    <https://arxiv.org/abs/1607.08022>`__.

    .. math::

        y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

    The mean and standard-deviation are calculated per-dimension separately
    for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
    of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
    The standard-deviation is calculated via the biased estimator, equivalent to
    `torch.var(input, unbiased=False)`.

    By default, this layer uses instance statistics computed from input data in
    both training and evaluation modes.

    If :attr:`track_running_stats` is set to ``True``, during training this
    layer keeps running estimates of its computed mean and variance, which are
    then used for normalization during evaluation. The running estimates are
    kept with a default :attr:`momentum` of 0.1.

    .. note::
        This :attr:`momentum` argument is different from one used in optimizer
        classes and the conventional notion of momentum. Mathematically, the
        update rule for running statistics here is
        :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
        where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
        new observed value.

    .. note::
        :class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
        have some subtle differences. :class:`InstanceNorm2d` is applied
        on each channel of channeled data like RGB images, but
        :class:`LayerNorm` is usually applied on entire sample and often in NLP
        tasks. Additionally, :class:`LayerNorm` applies elementwise affine
        transform, while :class:`InstanceNorm2d` usually don't apply affine
        transform.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, H, W)` or :math:`(C, H, W)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
        - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)

    Examples::

        >>> # Without Learnable Parameters
        >>> m = nn.InstanceNorm2d(100)
        >>> # With Learnable Parameters
        >>> m = nn.InstanceNorm2d(100, affine=True)
        >>> input = torch.randn(20, 100, 35, 45)
        >>> output = m(input)
    c                     dS Nrk   r=   r*   s    r   r+   z InstanceNorm2d._get_no_batch_dim8  rh   r    c                 |    |                                 dvr%t          d|                                  d          d S N)rk      zexpected 3D or 4D input (got rl   rm   r%   s     r   r'   zInstanceNorm2d._check_input_dim;  rn   r    Nro   r=   r    r   r   r      sD        D DL  T T T T Tr    r   c                   "    e Zd ZdZeZd Zd ZdS )r   a  A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of the ``num_features`` argument.

    The ``num_features`` argument of the :class:`InstanceNorm2d` is inferred from the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight`, `bias`,
    `running_mean` and `running_var`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, H, W)` or :math:`(C, H, W)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
        - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
    c                     dS rw   r=   r*   s    r   r+   z$LazyInstanceNorm2d._get_no_batch_dim^  rh   r    c                 |    |                                 dvr%t          d|                                  d          d S ry   rm   r%   s     r   r'   z#LazyInstanceNorm2d._check_input_dima  rn   r    N)rH   r^   r_   rp   r   rt   r+   r'   r=   r    r   r   r   @  H         6 #M  T T T T Tr    r   c                       e Zd ZdZd Zd ZdS )r	   a@  Applies Instance Normalization.

    This operation applies Instance Normalization
    over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper
    `Instance Normalization: The Missing Ingredient for Fast Stylization
    <https://arxiv.org/abs/1607.08022>`__.

    .. math::

        y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

    The mean and standard-deviation are calculated per-dimension separately
    for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
    of size C (where C is the input size) if :attr:`affine` is ``True``.
    The standard-deviation is calculated via the biased estimator, equivalent to
    `torch.var(input, unbiased=False)`.

    By default, this layer uses instance statistics computed from input data in
    both training and evaluation modes.

    If :attr:`track_running_stats` is set to ``True``, during training this
    layer keeps running estimates of its computed mean and variance, which are
    then used for normalization during evaluation. The running estimates are
    kept with a default :attr:`momentum` of 0.1.

    .. note::
        This :attr:`momentum` argument is different from one used in optimizer
        classes and the conventional notion of momentum. Mathematically, the
        update rule for running statistics here is
        :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
        where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
        new observed value.

    .. note::
        :class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but
        have some subtle differences. :class:`InstanceNorm3d` is applied
        on each channel of channeled data like 3D models with RGB color, but
        :class:`LayerNorm` is usually applied on entire sample and often in NLP
        tasks. Additionally, :class:`LayerNorm` applies elementwise affine
        transform, while :class:`InstanceNorm3d` usually don't apply affine
        transform.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
        - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)

    Examples::

        >>> # Without Learnable Parameters
        >>> m = nn.InstanceNorm3d(100)
        >>> # With Learnable Parameters
        >>> m = nn.InstanceNorm3d(100, affine=True)
        >>> input = torch.randn(20, 100, 35, 45, 10)
        >>> output = m(input)
    c                     dS Nrz   r=   r*   s    r   r+   z InstanceNorm3d._get_no_batch_dim  rh   r    c                 |    |                                 dvr%t          d|                                  d          d S N)rz      zexpected 4D or 5D input (got rl   rm   r%   s     r   r'   zInstanceNorm3d._check_input_dim  rn   r    Nro   r=   r    r   r	   r	   f  sD        C CJ  T T T T Tr    r	   c                   "    e Zd ZdZeZd Zd ZdS )r   a  A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of the ``num_features`` argument.

    The ``num_features`` argument of the :class:`InstanceNorm3d` is inferred from the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight`, `bias`,
    `running_mean` and `running_var`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
        - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
    c                     dS r   r=   r*   s    r   r+   z$LazyInstanceNorm3d._get_no_batch_dim  rh   r    c                 |    |                                 dvr%t          d|                                  d          d S r   rm   r%   s     r   r'   z#LazyInstanceNorm3d._check_input_dim  rn   r    N)rH   r^   r_   rp   r	   rt   r+   r'   r=   r    r   r   r     r~   r    r   )rZ   torch.nn.functionalnn
functionalr2   torchr   	batchnormr   r   __all__r   r   r
   r   r   r	   r   r=   r    r   <module>r      s                   / / / / / / / /  g0 g0 g0 g0 g0I g0 g0 g0TJT JT JT JT JT] JT JT JTZ"T "T "T "T "T "T "T "TJLT LT LT LT LT] LT LT LT^#T #T #T #T #T #T #T #TLKT KT KT KT KT] KT KT KT\#T #T #T #T #T #T #T #T #T #Tr    