
    Χg`                     p    d dl mc mZ d dlmZ ddlmZ ddgZ G d de          Z	 G d de          Z
dS )	    N)Tensor   )ModulePixelShufflePixelUnshufflec                   Z     e Zd ZU dZdgZeed<   deddf fdZdedefdZ	de
fdZ xZS )	r   aS  Rearrange elements in a tensor according to an upscaling factor.

    Rearranges elements in a tensor of shape :math:`(*, C \times r^2, H, W)`
    to a tensor of shape :math:`(*, C, H \times r, W \times r)`, where r is an upscale factor.

    This is useful for implementing efficient sub-pixel convolution
    with a stride of :math:`1/r`.

    See the paper:
    `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
    by Shi et al. (2016) for more details.

    Args:
        upscale_factor (int): factor to increase spatial resolution by

    Shape:
        - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
        - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where

    .. math::
        C_{out} = C_{in} \div \text{upscale\_factor}^2

    .. math::
        H_{out} = H_{in} \times \text{upscale\_factor}

    .. math::
        W_{out} = W_{in} \times \text{upscale\_factor}

    Examples::

        >>> pixel_shuffle = nn.PixelShuffle(3)
        >>> input = torch.randn(1, 9, 4, 4)
        >>> output = pixel_shuffle(input)
        >>> print(output.size())
        torch.Size([1, 1, 12, 12])

    .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
        https://arxiv.org/abs/1609.05158
    upscale_factorreturnNc                 V    t                                                       || _        d S N)super__init__r	   )selfr	   	__class__s     Y/var/www/html/ai-engine/env/lib/python3.11/site-packages/torch/nn/modules/pixelshuffle.pyr   zPixelShuffle.__init__6   s'    ,    inputc                 6    t          j        || j                  S r   )Fpixel_shuffler	   r   r   s     r   forwardzPixelShuffle.forward:   s    ud&9:::r   c                     d| j          S )Nzupscale_factor=)r	   r   s    r   
extra_reprzPixelShuffle.extra_repr=   s    6!4666r   __name__
__module____qualname____doc____constants__int__annotations__r   r   r   strr   __classcell__r   s   @r   r   r   
   s         & &P &&M-s -t - - - - - -;V ; ; ; ; ;7C 7 7 7 7 7 7 7 7r   c                   Z     e Zd ZU dZdgZeed<   deddf fdZdedefdZ	de
fdZ xZS )	r   a  Reverse the PixelShuffle operation.

    Reverses the :class:`~torch.nn.PixelShuffle` operation by rearranging elements
    in a tensor of shape :math:`(*, C, H \times r, W \times r)` to a tensor of shape
    :math:`(*, C \times r^2, H, W)`, where r is a downscale factor.

    See the paper:
    `Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network`_
    by Shi et al. (2016) for more details.

    Args:
        downscale_factor (int): factor to decrease spatial resolution by

    Shape:
        - Input: :math:`(*, C_{in}, H_{in}, W_{in})`, where * is zero or more batch dimensions
        - Output: :math:`(*, C_{out}, H_{out}, W_{out})`, where

    .. math::
        C_{out} = C_{in} \times \text{downscale\_factor}^2

    .. math::
        H_{out} = H_{in} \div \text{downscale\_factor}

    .. math::
        W_{out} = W_{in} \div \text{downscale\_factor}

    Examples::

        >>> pixel_unshuffle = nn.PixelUnshuffle(3)
        >>> input = torch.randn(1, 1, 12, 12)
        >>> output = pixel_unshuffle(input)
        >>> print(output.size())
        torch.Size([1, 9, 4, 4])

    .. _Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network:
        https://arxiv.org/abs/1609.05158
    downscale_factorr
   Nc                 V    t                                                       || _        d S r   )r   r   r(   )r   r(   r   s     r   r   zPixelUnshuffle.__init__k   s'     0r   r   c                 6    t          j        || j                  S r   )r   pixel_unshuffler(   r   s     r   r   zPixelUnshuffle.forwardo   s     (=>>>r   c                     d| j          S )Nzdownscale_factor=)r(   r   s    r   r   zPixelUnshuffle.extra_reprr   s    :4#8:::r   r   r&   s   @r   r   r   A   s         $ $L ((M1 1 1 1 1 1 1 1?V ? ? ? ? ?;C ; ; ; ; ; ; ; ;r   )torch.nn.functionalnn
functionalr   torchr   moduler   __all__r   r    r   r   <module>r4      s                         +
,47 47 47 47 476 47 47 47n2; 2; 2; 2; 2;V 2; 2; 2; 2; 2;r   