
    ڧg#                        d dl mZ d dlmZmZ d dlmZ ddlmZ ddl	m
Z
mZmZ ddlmZ dd	lmZmZmZ dd
lmZmZmZmZmZ ddlmZ g dZ G d de          Z G d dej                  ZedddZ G d de          Z G d de          Z dede!dee"         defdZ# e
             edej$        fdej%        f           d!d"d!d!ej%        d#d$ee         d%e"dee!         d&ee"         d'ee         d(edefd)                        Z& e
             ede j$        fdej%        f           d!d"d!d!ej%        d#d$ee          d%e"dee!         d&ee"         d'ee         d(edefd*                        Z'd!S )+    )partial)AnyOptional)nn   )SemanticSegmentation   )register_modelWeightsWeightsEnum)_VOC_CATEGORIES)_ovewrite_value_paramhandle_legacy_interfaceIntermediateLayerGetter)ResNet	resnet101ResNet101_Weightsresnet50ResNet50_Weights   )_SimpleSegmentationModel)FCNFCN_ResNet50_WeightsFCN_ResNet101_Weightsfcn_resnet50fcn_resnet101c                       e Zd ZdZdS )r   a  
    Implements FCN model from
    `"Fully Convolutional Networks for Semantic Segmentation"
    <https://arxiv.org/abs/1411.4038>`_.

    Args:
        backbone (nn.Module): the network used to compute the features for the model.
            The backbone should return an OrderedDict[Tensor], with the key being
            "out" for the last feature map used, and "aux" if an auxiliary classifier
            is used.
        classifier (nn.Module): module that takes the "out" element returned from
            the backbone and returns a dense prediction.
        aux_classifier (nn.Module, optional): auxiliary classifier used during training
    N)__name__
__module____qualname____doc__     _/var/www/html/ai-engine/env/lib/python3.11/site-packages/torchvision/models/segmentation/fcn.pyr   r      s          	Dr#   r   c                   ,     e Zd Zdededdf fdZ xZS )FCNHeadin_channelschannelsreturnNc           	         |dz  }t          j        ||ddd          t          j        |          t          j                    t          j        d          t          j        ||d          g} t                      j        |  d S )N   r   r   F)paddingbiasg?)r   Conv2dBatchNorm2dReLUDropoutsuper__init__)selfr'   r(   inter_channelslayers	__class__s        r$   r3   zFCNHead.__init__%   sz    $)Ik>1aeLLLN>**GIIJsOOInh22
 	&!!!!r#   )r   r   r    intr3   __classcell__)r7   s   @r$   r&   r&   $   sR        
"C 
"3 
"4 
" 
" 
" 
" 
" 
" 
" 
" 
" 
"r#   r&   )r   r   z
        These weights were trained on a subset of COCO, using only the 20 categories that are present in the Pascal VOC
        dataset.
    )
categoriesmin_size_docsc                   b    e Zd Z ed eed          i edddddd	id
dd          ZeZdS )r   zBhttps://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth  resize_sizeijzPhttps://github.com/pytorch/vision/tree/main/references/segmentation#fcn_resnet50COCO-val2017-VOC-labelsg     @N@gV@miou	pixel_accgmc@g?5^I`@
num_paramsrecipe_metrics_ops
_file_sizeurl
transformsmetaN	r   r   r    r   r   r   _COMMON_METACOCO_WITH_VOC_LABELS_V1DEFAULTr"   r#   r$   r   r   <   s        %gP7/SAAA

"h) !%, , !
 
 
  " &GGGr#   r   c                   b    e Zd Z ed eed          i edddddd	id
dd          ZeZdS )r   zChttps://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pthr>   r?   ij<zWhttps://github.com/pytorch/vision/tree/main/references/segmentation#deeplabv3_resnet101rA   gO@gV@rB   gV-m@gni@rE   rK   NrO   r"   r#   r$   r   r   Q   s        %gQ7/SAAA

"o) !%, , !
 
 
  " &GGGr#   r   backbonenum_classesauxr)   c                     ddi}|rd|d<   t          | |          } |rt          d|          nd }t          d|          }t          | ||          S )Nlayer4outrV   layer3)return_layersi   i   )r   r&   r   )rT   rU   rV   r[   aux_classifier
classifiers         r$   _fcn_resnetr^   f   sl    
 u%M
 ("'h&x}MMMH36@WT;///DN{++Jx^444r#   
pretrainedpretrained_backbone)weightsweights_backboneNT)ra   progressrU   aux_lossrb   ra   rc   rd   rb   kwargsc                    t                               |           } t          j        |          }| =d}t          d|t	          | j        d                             }t          d|d          }n|d}t          |g d          }t          |||          }| *|                    | 	                    |d	                     |S )
a\  Fully-Convolutional Network model with a ResNet-50 backbone from the `Fully Convolutional
    Networks for Semantic Segmentation <https://arxiv.org/abs/1411.4038>`_ paper.

    .. betastatus:: segmentation module

    Args:
        weights (:class:`~torchvision.models.segmentation.FCN_ResNet50_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.segmentation.FCN_ResNet50_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        num_classes (int, optional): number of output classes of the model (including the background).
        aux_loss (bool, optional): If True, it uses an auxiliary loss.
        weights_backbone (:class:`~torchvision.models.ResNet50_Weights`, optional): The pretrained
            weights for the backbone.
        **kwargs: parameters passed to the ``torchvision.models.segmentation.fcn.FCN``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/fcn.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.segmentation.FCN_ResNet50_Weights
        :members:
    NrU   r:   rd   T   FTTra   replace_stride_with_dilationrc   
check_hash)
r   verifyr   r   lenrN   r   r^   load_state_dictget_state_dictra   rc   rU   rd   rb   re   rT   models           r$   r   r   u   s    P #))'22G'./?@@+M;GLYeLfHgHghh(XtDD		 0ObObObcccH+x88Eg44hSW4XXYYYLr#   c                    t                               |           } t          j        |          }| =d}t          d|t	          | j        d                             }t          d|d          }n|d}t          |g d          }t          |||          }| *|                    | 	                    |d	                     |S )
aa  Fully-Convolutional Network model with a ResNet-101 backbone from the `Fully Convolutional
    Networks for Semantic Segmentation <https://arxiv.org/abs/1411.4038>`_ paper.

    .. betastatus:: segmentation module

    Args:
        weights (:class:`~torchvision.models.segmentation.FCN_ResNet101_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.segmentation.FCN_ResNet101_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        num_classes (int, optional): number of output classes of the model (including the background).
        aux_loss (bool, optional): If True, it uses an auxiliary loss.
        weights_backbone (:class:`~torchvision.models.ResNet101_Weights`, optional): The pretrained
            weights for the backbone.
        **kwargs: parameters passed to the ``torchvision.models.segmentation.fcn.FCN``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/segmentation/fcn.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.segmentation.FCN_ResNet101_Weights
        :members:
    NrU   r:   rd   Trg   rh   ri   rk   )
r   rm   r   r   rn   rN   r   r^   ro   rp   rq   s           r$   r   r      s    P $**733G(/0@AA+M;GLYeLfHgHghh(XtDD		!1PcPcPcdddH+x88Eg44hSW4XXYYYLr#   )(	functoolsr   typingr   r   torchr   transforms._presetsr   _apir
   r   r   _metar   _utilsr   r   r   resnetr   r   r   r   r   r   __all__r   
Sequentialr&   rP   r   r   r8   boolr^   rQ   IMAGENET1K_V1r   r   r"   r#   r$   <module>r      s                               7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 7 # # # # # # \ \ \ \ \ \ \ \ \ \ U U U U U U U U U U U U U U , , , , , , d
c
c	 	 	 	 	
" 	 	 	&" " " " "bm " " " " & & & & &; & & &*& & & & &K & & &*555 
$5 		5 5 5 5 /GH+-=-KL   /3!%#3C3Q3 3 3*+3 3 #	3
 tn3 /03 3 	3 3 3	  
3l 0HI+->-LM   04!%#4E4S3 3 3+,3 3 #	3
 tn3 013 3 	3 3 3	  
3 3 3r#   