
    קg~                        d dl Z d dlZd dlmZmZmZmZmZ d dlZd dl	m
Z
 d dlmZ ddlmZmZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZ ddlmZ ddlmZmZmZ ddl m!Z!m"Z" ddl#m$Z$ dee
ej%        j&        f         dee'ef         ddfdZ(dej%        j&        ddfdZ)de
ddfdZ*dej%        j&        ddfdZ+	 	 d1de
de,deeee'ef         df         deeee'ef         df         de
f
dZ-	 	 	 	 d2dej%        j&        dee$ee'ef         f         de,deedf         deeee'ef         df         deee$ee'ef         f                  deeee'ef         df         d e,de
fd!Z.	 	 d1dej%        j&        dee$ee'ef         f         de,deedf         deeee'ef         df         deeee'ef         df         de
fd"Z/	 	 d1dej%        j&        deeee'ef         df         deeee'ef         df         de
fd#Z0	 	 	 d3dej%        j&        dee$ee'ef         f         deedf         deeee'ef         df         deee$ee'ef         f                  deeee'ef         df         de
fd$Z1	 	 d1dej%        j&        dee$ee'ef         f         deedf         deeee'ef         df         deeee'ef         df         de
fd%Z2	 	 	 	 	 	 d4d'e
d(e,d)eeee'ef         df         d e,d*e,dee$ee'ef         df         deeee'ef         df         d+e,de
fd,Z3	 	 	 	 d5d'e
d)eeee'ef         df         d*e,dee$ee'ef         df         deeee'ef         df         de
fd-Z4	 	 	 	 d5d'e
d)eeee'ef         df         d*e,dee$ee'ef         df         deeee'ef         df         de
fd.Z5	 	 	 d3d'e
d)eeee'ef         df         dee$ee'ef         df         deeee'ef         df         de
f
d/Z6	 	 d6d'e
d(e,d)eeee'ef         df         de
fd0Z7dS )7    N)AnyDictOptionalTupleUnion)GraphModule)_USER_PRESERVED_ATTRIBUTES_KEY   )BackendConfigget_tensorrt_backend_config)convert)ConvertCustomConfigFuseCustomConfigPrepareCustomConfig)fuse)ObservedGraphModule)prepare)QuantizationTracerScopeScopeContextManager)get_custom_module_class_keys#get_skipped_module_name_and_classes)QConfigMappingmodelpreserved_attrsreturnc                     t          j         |          | j        t          <   | j        t                                                   D ]\  }}t	          | ||           dS )zXStore preserved attributes to the model.meta so that it can be preserved during deepcopyN)copymetar	   itemssetattr)r   r   	attr_nameattrs       ]/var/www/html/ai-engine/env/lib/python3.11/site-packages/torch/ao/quantization/quantize_fx.pyattach_preserved_attrs_to_modelr%      sc    
 26?1K1KEJ-. !:&DEKKMM ( (	4y$''''( (    c                     t          | t                    s2t          dt          t	          |                     z   dz   dz             d S )Nz,input model must be a GraphModule, Got type:z Please make zsure to follow the tutorials.)
isinstancer   
ValueErrorstrtype)r   s    r$   _check_is_graph_moduler,   #   s_    e[)) 
$u++  .	.
 
 	

 
r&   c                 R    | j         j        D ]}t          |d          si |_        dS )a  Attach meta field to all nodes of the graph if it does not exist,
    meta field is a field stores some meta information about the node, such
    as dtype and shape information for output of the node, this only exists
    if the program is captured by make_fx (used in quantize_pt2e flow), if
    the program is captured by torch.fx symbolic tracing, this field may not exist,
    so we add it here to avoid checking this all over the places
    r   N)graphnodeshasattrr   )r   nodes     r$   !_attach_meta_to_node_if_not_existr2   .   s<     !  tV$$ 	DI r&   c                 T   g }|                                  D ]S\  }}t          |t          j        j        j        j                  r|                    |           Dt          |           T|D ]:}| j	        |= t          j        j        j        
                                | j	        |<   ;dS )z+Swap FloatFunctional with FXFloatFunctionalN)named_childrenr(   torchaonn	quantizedFloatFunctionalappend_swap_ff_with_fxff_modulesFXFloatFunctional)r   modules_to_swapnamemodules       r$   r;   r;   ;   s    O,,.. ' 'ffehk3CDD 	'""4((((v&&&& I IN4 $x{4FFHHtI Ir&   is_qatfuse_custom_configbackend_configc                 D    t          |            t          | |||          S )zInternal helper function to fuse modules in preparation for quantization

    Args:
        model: GraphModule object from symbolic tracing (torch.fx.symbolic_trace)
    )r,   r   )r   rA   rB   rC   s       r$   _fuse_fxrE   I   s.     5!!!v)>  r&   Fqconfig_mappingexample_inputs.prepare_custom_config_equalization_configis_standalone_modulec                     |t                      }|t                      }t          |t                    r0t	          j        dt          d           t          j        |          }t                      t          ||          \  }}	|j
        }
 fd|
D             }t          ||	          }t           |                                         }t          |           t                                          |j
                  }t#          ||||          }t%          ||||j        |||||	  	        }t)          ||           |S )aZ  Internal helper function for prepare_fx
        Args:
          `model`, `qconfig_mapping`, `prepare_custom_config`, `_equalization_config`:
          see docs for :func:`~torch.ao.quantization.prepare_fx`
          `is_standalone_module`: a boolean flag indicates whether we are
          quantizing a standalone module or not, a standalone module
          is a submodule of the parent module that is not inlined in the
    forward graph of the parent module,
          the way we quantize standalone module is described in:
          :func:`~torch.ao.quantization._prepare_standalone_module_fx`
    NzPassing a prepare_custom_config_dict to prepare is deprecated and will not be supported in a future version. Please pass in a PrepareCustomConfig instead.   
stacklevelc                 R    i | ]#}t          |          |t          |          $S  r0   getattr.0r#   r   s     r$   
<dictcomp>z_prepare_fx.<locals>.<dictcomp>   E       5$geT""  r&   )rG   rH   rI   rC   rJ   )r   r   r(   dictwarningswarnFutureWarning	from_dictr;   r   preserved_attributesr   r   tracer2   r   set_preserved_attributesrE   r   node_name_to_scoper%   )r   rF   rA   rG   rH   rI   rC   rJ   skipped_module_namesskipped_module_classespreserved_attr_namesr   tracergraph_modulerB   prepareds   `               r$   _prepare_fxrf   Z   s   * $ 3 5 5#-//'.. UQ		
 	
 	
 	
 !4 =>S T T u3V34 400 1E   (  O   46LMMFufll5&9&9::L%l333)++DD2  L&2DnUUL!%31%1
 
 
H $Ho>>>Or&   c           	      .    t          | |||||d          S )a  [Internal use only] Prepare a standalone module, so that it can be used when quantizing the
    parent module.
    standalone_module means it a submodule that is not inlined in parent module,
    and will be quantized separately as one unit.

    How the standalone module is observed is specified by `input_quantized_idxs` and
    `output_quantized_idxs` in the prepare_custom_config for the standalone module

    Returns:

        * model(GraphModule): prepared standalone module. It has these attributes in
          model.meta:

            * `standalone_module_input_quantized_idxs(List[Int])`: a list of
              indexes for the graph input that is expected to be quantized,
              same as input_quantized_idxs configuration provided
              for the standalone module
            * `standalone_module_output_quantized_idxs(List[Int])`: a list of
              indexs for the graph output that is quantized
              same as input_quantized_idxs configuration provided
              for the standalone module

    T)rC   rJ   )rf   )r   rF   rA   rG   rH   rC   s         r$   _prepare_standalone_module_fxrh      s0    > %!   r&   c                     |t                      }t          |t                    r0t          j        dt
          d           t          j        |          }t          j        	                    d           |j
        } fd|D             }t          j                                       }t          |           t          |d||          }t          ||           |S )a  Fuse modules like conv+bn, conv+bn+relu etc, model must be in eval mode.
    Fusion rules are defined in torch.ao.quantization.fx.fusion_pattern.py

    Args:

        * `model` (torch.nn.Module): a torch.nn.Module model
        * `fuse_custom_config` (FuseCustomConfig): custom configurations for fuse_fx.
            See :class:`~torch.ao.quantization.fx.custom_config.FuseCustomConfig` for more details
    Example::

        from torch.ao.quantization import fuse_fx
        m = Model().eval()
        m = fuse_fx(m)

    NzPassing a fuse_custom_config_dict to fuse is deprecated and will not be supported in a future version. Please pass in a FuseCustomConfig instead.   rM   z$quantization_api.quantize_fx.fuse_fxc                 R    i | ]#}t          |          |t          |          $S rP   rQ   rS   s     r$   rU   zfuse_fx.<locals>.<dictcomp>   rV   r&   F)r   r(   rW   rX   rY   rZ   r[   r5   _C_log_api_usage_oncer\   fxsymbolic_tracer2   rE   r%   )r   rB   rC   rb   r   rd   s   `     r$   fuse_fxrp      s    ( !-//$d++ LN		
 	
 	
 	
 .78JKK	H  !GHHH-B   (  O 8**511L%l333L%1C^TTL#L/BBBr&   c           	      j    t           j                            d           t          | |d||||          S )a   Prepare a model for post training quantization

    Args:
      * `model` (torch.nn.Module): torch.nn.Module model

      * `qconfig_mapping` (QConfigMapping): QConfigMapping object to configure how a model is
         quantized, see :class:`~torch.ao.quantization.qconfig_mapping.QConfigMapping`
         for more details

      * `example_inputs` (Tuple[Any, ...]): Example inputs for forward function of the model,
         Tuple of positional args (keyword args can be passed as positional args as well)

      * `prepare_custom_config` (PrepareCustomConfig): customization configuration for quantization tool.
          See :class:`~torch.ao.quantization.fx.custom_config.PrepareCustomConfig` for more details

      * `_equalization_config`: config for specifying how to perform equalization on the model

      * `backend_config` (BackendConfig): config that specifies how operators are quantized
         in a backend, this includes how the operators are observed,
         supported fusion patterns, how quantize/dequantize ops are
         inserted, supported dtypes etc. See :class:`~torch.ao.quantization.backend_config.BackendConfig` for more details

    Return:
      A GraphModule with observer (configured by qconfig_mapping), ready for calibration

    Example::

        import torch
        from torch.ao.quantization import get_default_qconfig_mapping
        from torch.ao.quantization.quantize_fx import prepare_fx

        class Submodule(torch.nn.Module):
            def __init__(self) -> None:
                super().__init__()
                self.linear = torch.nn.Linear(5, 5)
            def forward(self, x):
                x = self.linear(x)
                return x

        class M(torch.nn.Module):
            def __init__(self) -> None:
                super().__init__()
                self.linear = torch.nn.Linear(5, 5)
                self.sub = Submodule()

            def forward(self, x):
                x = self.linear(x)
                x = self.sub(x) + x
                return x

        # initialize a floating point model
        float_model = M().eval()

        # define calibration function
        def calibrate(model, data_loader):
            model.eval()
            with torch.no_grad():
                for image, target in data_loader:
                    model(image)

        # qconfig is the configuration for how we insert observers for a particular
        # operator
        # qconfig = get_default_qconfig("fbgemm")
        # Example of customizing qconfig:
        # qconfig = torch.ao.quantization.QConfig(
        #    activation=MinMaxObserver.with_args(dtype=torch.qint8),
        #    weight=MinMaxObserver.with_args(dtype=torch.qint8))
        # `activation` and `weight` are constructors of observer module

        # qconfig_mapping is a collection of quantization configurations, user can
        # set the qconfig for each operator (torch op calls, functional calls, module calls)
        # in the model through qconfig_mapping
        # the following call will get the qconfig_mapping that works best for models
        # that target "fbgemm" backend
        qconfig_mapping = get_default_qconfig_mapping("fbgemm")

        # We can customize qconfig_mapping in different ways.
        # e.g. set the global qconfig, which means we will use the same qconfig for
        # all operators in the model, this can be overwritten by other settings
        # qconfig_mapping = QConfigMapping().set_global(qconfig)
        # e.g. quantize the linear submodule with a specific qconfig
        # qconfig_mapping = QConfigMapping().set_module_name("linear", qconfig)
        # e.g. quantize all nn.Linear modules with a specific qconfig
        # qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig)
        # for a more complete list, please see the docstring for :class:`torch.ao.quantization.QConfigMapping`
        # argument

        # example_inputs is a tuple of inputs, that is used to infer the type of the
        # outputs in the model
        # currently it's not used, but please make sure model(*example_inputs) runs
        example_inputs = (torch.randn(1, 3, 224, 224),)

        # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
        # e.g. backend_config = get_default_backend_config("fbgemm")
        # `prepare_fx` inserts observers in the model based on qconfig_mapping and
        # backend_config. If the configuration for an operator in qconfig_mapping
        # is supported in the backend_config (meaning it's supported by the target
        # hardware), we'll insert observer modules according to the qconfig_mapping
        # otherwise the configuration in qconfig_mapping will be ignored
        #
        # Example:
        # in qconfig_mapping, user sets linear module to be quantized with quint8 for
        # activation and qint8 for weight:
        # qconfig = torch.ao.quantization.QConfig(
        #     observer=MinMaxObserver.with_args(dtype=torch.quint8),
        #     weight=MinMaxObserver.with-args(dtype=torch.qint8))
        # Note: current qconfig api does not support setting output observer, but
        # we may extend this to support these more fine grained control in the
        # future
        #
        # qconfig_mapping = QConfigMapping().set_object_type(torch.nn.Linear, qconfig)
        # in backend config, linear module also supports in this configuration:
        # weighted_int8_dtype_config = DTypeConfig(
        #   input_dtype=torch.quint8,
        #   output_dtype=torch.quint8,
        #   weight_dtype=torch.qint8,
        #   bias_type=torch.float)

        # linear_pattern_config = BackendPatternConfig(torch.nn.Linear) \
        #    .set_observation_type(ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT) \
        #    .add_dtype_config(weighted_int8_dtype_config) \
        #    ...

        # backend_config = BackendConfig().set_backend_pattern_config(linear_pattern_config)
        # `prepare_fx` will check that the setting requested by suer in qconfig_mapping
        # is supported by the backend_config and insert observers and fake quant modules
        # in the model
        prepared_model = prepare_fx(float_model, qconfig_mapping, example_inputs)
        # Run calibration
        calibrate(prepared_model, sample_inference_data)
    z'quantization_api.quantize_fx.prepare_fxFr5   rl   rm   rf   )r   rF   rG   rH   rI   rC   s         r$   
prepare_fxrs      sC    V 
H  !JKKK  r&   c                 j    t           j                            d           t          | |d|||          S )a  Prepare a model for quantization aware training

    Args:
      * `model` (torch.nn.Module): torch.nn.Module model
      * `qconfig_mapping` (QConfigMapping): see :func:`~torch.ao.quantization.prepare_fx`
      * `example_inputs` (Tuple[Any, ...]): see :func:`~torch.ao.quantization.prepare_fx`
      * `prepare_custom_config` (PrepareCustomConfig): see :func:`~torch.ao.quantization.prepare_fx`
      * `backend_config` (BackendConfig): see :func:`~torch.ao.quantization.prepare_fx`

    Return:
      A GraphModule with fake quant modules (configured by qconfig_mapping and backend_config), ready for
      quantization aware training

    Example::

        import torch
        from torch.ao.quantization import get_default_qat_qconfig_mapping
        from torch.ao.quantization.quantize_fx import prepare_qat_fx

        class Submodule(torch.nn.Module):
            def __init__(self) -> None:
                super().__init__()
                self.linear = torch.nn.Linear(5, 5)
            def forward(self, x):
                x = self.linear(x)
                return x

        class M(torch.nn.Module):
            def __init__(self) -> None:
                super().__init__()
                self.linear = torch.nn.Linear(5, 5)
                self.sub = Submodule()

            def forward(self, x):
                x = self.linear(x)
                x = self.sub(x) + x
                return x

        # initialize a floating point model
        float_model = M().train()
        # (optional, but preferred) load the weights from pretrained model
        # float_model.load_weights(...)

        # define the training loop for quantization aware training
        def train_loop(model, train_data):
            model.train()
            for image, target in data_loader:
                ...

        # qconfig is the configuration for how we insert observers for a particular
        # operator
        # qconfig = get_default_qconfig("fbgemm")
        # Example of customizing qconfig:
        # qconfig = torch.ao.quantization.QConfig(
        #    activation=FakeQuantize.with_args(observer=MinMaxObserver.with_args(dtype=torch.qint8)),
        #    weight=FakeQuantize.with_args(observer=MinMaxObserver.with_args(dtype=torch.qint8)))
        # `activation` and `weight` are constructors of observer module

        # qconfig_mapping is a collection of quantization configurations, user can
        # set the qconfig for each operator (torch op calls, functional calls, module calls)
        # in the model through qconfig_mapping
        # the following call will get the qconfig_mapping that works best for models
        # that target "fbgemm" backend
        qconfig_mapping = get_default_qat_qconfig("fbgemm")

        # We can customize qconfig_mapping in different ways, please take a look at
        # the docstring for :func:`~torch.ao.quantization.prepare_fx` for different ways
        # to configure this

        # example_inputs is a tuple of inputs, that is used to infer the type of the
        # outputs in the model
        # currently it's not used, but please make sure model(*example_inputs) runs
        example_inputs = (torch.randn(1, 3, 224, 224),)

        # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
        # e.g. backend_config = get_default_backend_config("fbgemm")
        # `prepare_qat_fx` inserts observers in the model based on qconfig_mapping and
        # backend_config, if the configuration for an operator in qconfig_mapping
        # is supported in the backend_config (meaning it's supported by the target
        # hardware), we'll insert fake_quantize modules according to the qconfig_mapping
        # otherwise the configuration in qconfig_mapping will be ignored
        # see :func:`~torch.ao.quantization.prepare_fx` for a detailed explanation of
        # how qconfig_mapping interacts with backend_config
        prepared_model = prepare_qat_fx(float_model, qconfig_mapping, example_inputs)
        # Run training
        train_loop(prepared_model, train_loop)

    z+quantization_api.quantize_fx.prepare_qat_fxT)rC   rr   )r   rF   rG   rH   rC   s        r$   prepare_qat_fxru     sE    ~ 
H  !NOOO%   r&   Trd   is_referenceconvert_custom_config_remove_qconfigis_decomposedc           
      H    |t                      }t          |t                    r0t          j        dt
          d           t          j        |          }t                      |j        } fd|D             }	t           |||||||          }
t          |
|	           |
S )z_`is_standalone_module`: see docs in :func:`~torch.ao.quantization.prepare_standalone_module_fx`NzPassing a convert_custom_config_dict to convert is deprecated and will not be supported in a future version. Please pass in a ConvertCustomConfig instead.rL   rM   c                 R    i | ]#}t          |          |t          |          $S rP   rQ   )rT   r#   rd   s     r$   rU   z_convert_fx.<locals>.<dictcomp>  sE       <&&glD))  r&   )_remove_qconfig_flagrF   rC   ry   )r   r(   rW   rX   rY   rZ   r[   r,   r\   r   r%   )rd   rv   rw   rJ   rx   rF   rC   ry   rb   r   r8   s   `          r$   _convert_fxr}     s     $ 3 5 5'.. UQ		
 	
 	
 	
 !4 =>S T T<(((0E   (  O ,'%#	 	 	I $I???r&   c                 j    t           j                            d           t          | d||||          S )a
  Convert a calibrated or trained model to a quantized model

    Args:
        * `graph_module` (torch.fx.GraphModule): A prepared and calibrated/trained model (GraphModule)

        * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function.
            See :class:`~torch.ao.quantization.fx.custom_config.ConvertCustomConfig` for more details

        * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert.

        * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization.

           The keys must include the ones in the qconfig_mapping passed to `prepare_fx` or `prepare_qat_fx`,
           with the same values or `None`. Additional keys can be specified with values set to `None`.

          For each entry whose value is set to None, we skip quantizing that entry in the model::

            qconfig_mapping = QConfigMapping
                .set_global(qconfig_from_prepare)
                .set_object_type(torch.nn.functional.add, None)  # skip quantizing torch.nn.functional.add
                .set_object_type(torch.nn.functional.linear, qconfig_from_prepare)
                .set_module_name("foo.bar", None)  # skip quantizing module "foo.bar"

         * `backend_config` (BackendConfig): A configuration for the backend which describes how
            operators should be quantized in the backend, this includes quantization
            mode support (static/dynamic/weight_only), dtype support (quint8/qint8 etc.),
            observer placement for each operators and fused operators.
            See :class:`~torch.ao.quantization.backend_config.BackendConfig` for more details

    Return:
        A quantized model (torch.nn.Module)

    Example::

        # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training
        # convert_fx converts a calibrated/trained model to a quantized model for the
        # target hardware, this includes converting the model first to a reference
        # quantized model, and then lower the reference quantized model to a backend
        # Currently, the supported backends are fbgemm (onednn), qnnpack (xnnpack) and
        # they share the same set of quantized operators, so we are using the same
        # lowering procedure
        #
        # backend_config defines the corresponding reference quantized module for
        # the weighted modules in the model, e.g. nn.Linear
        # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
        # e.g. backend_config = get_default_backend_config("fbgemm")
        quantized_model = convert_fx(prepared_model)

    z'quantization_api.quantize_fx.convert_fxFrv   rw   rx   rF   rC   r5   rl   rm   r}   rd   rw   rx   rF   rC   s        r$   
convert_fxr   +  sE    p 
H  !JKKK3''%   r&   c                 j    t           j                            d           t          | d||||          S )a}  Convert a calibrated or trained model to a reference quantized model,
    see https://github.com/pytorch/rfcs/blob/master/RFC-0019-Extending-PyTorch-Quantization-to-Custom-Backends.md for more details,
    reference quantized model is a standard representation of a quantized model provided
    by FX Graph Mode Quantization, it can be further lowered to run on the target
    hardware, like accelerators

    Args:
        * `graph_module` (GraphModule): A prepared and calibrated/trained model (GraphModule)

        * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function.
            See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

        * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert.

        * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization.
            See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

         * `backend_config` (BackendConfig): A configuration for the backend which describes how
            operators should be quantized in the backend. See
            :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

    Return:
        A reference quantized model (GraphModule)

    Example::

        # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training
        # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
        # e.g. backend_config = get_default_backend_config("fbgemm")
        reference_quantized_model = convert_to_reference_fx(prepared_model)

    z4quantization_api.quantize_fx.convert_to_reference_fxTr   r   r   s        r$   convert_to_reference_fxr   n  sE    N 
H  !WXXX3''%   r&   c           	      l    t           j                            d           t          | d|d||d          S )a  Convert a calibrated or trained model to a reference quantized model, with
    decomposed representation for quantized Tensor
    see https://github.com/pytorch/rfcs/blob/master/RFC-0019-Extending-PyTorch-Quantization-to-Custom-Backends.md for more details,
    reference quantized model is a standard representation of a quantized model provided
    by FX Graph Mode Quantization, it can be further lowered to run on the target
    hardware, like accelerators

    Note: this is not public API

    Args:
        * `graph_module` (GraphModule): A prepared and calibrated/trained model (GraphModule)

        * `convert_custom_config` (ConvertCustomConfig): custom configurations for convert function.
            See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

        * `_remove_qconfig` (bool): Option to remove the qconfig attributes in the model after convert.

        * `qconfig_mapping` (QConfigMapping): config for specifying how to convert a model for quantization.
            See :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

         * `backend_config` (BackendConfig): A configuration for the backend which describes how
            operators should be quantized in the backend. See
            :func:`~torch.ao.quantization.quantize_fx.convert_fx` for more details.

    Return:
        A reference quantized model (GraphModule) with operators working with decomposed quantized Tensor

    Example::

        # prepared_model: the model after prepare_fx/prepare_qat_fx and calibration/training
        # TODO: add backend_config after we split the backend_config for fbgemm and qnnpack
        # e.g. backend_config = get_default_backend_config("fbgemm")
        reference_quantized_model = _convert_to_reference_decomposed_fx(prepared_model)

    z@quantization_api.quantize_fx._convert_to_reference_decomposed_fxTF)rv   rw   rx   rF   rC   ry   r   )rd   rw   rF   rC   s       r$   #_convert_to_reference_decomposed_fxr     sP    R 
H  J   3'%   r&   c                 (    t          | ||d          S )av  [Internal use only] Convert a model produced by :func:`~torch.ao.quantization.prepare_standalone_module_fx`
    and convert it to a quantized model

    Returns a quantized standalone module, whether input/output is quantized is
    specified by prepare_custom_config, with
    input_quantized_idxs, output_quantized_idxs, please
    see docs for prepare_fx for details
    T)rJ   )r}   )rd   rv   rw   s      r$   _convert_standalone_module_fxr     s'     !	   r&   )NN)NNNF)NNN)NFTNNF)NTNN)FN)8r   rX   typingr   r   r   r   r   r5   torch.fxr   torch.fx.graph_moduler	   rC   r   r   
fx.convertr   fx.custom_configr   r   r   fx.fuser   fx.graph_moduler   
fx.preparer   	fx.tracerr   r   r   fx.utilsr   r   rF   r   r7   Moduler*   r%   r,   r2   r;   boolrE   rf   rh   rp   rs   ru   r}   r   r   r   r   rP   r&   r$   <module>r      s		     4 4 4 4 4 4 4 4 4 4 4 4 4 4              @ @ @ @ @ @ F F F F F F F F       X X X X X X X X X X       0 0 0 0 0 0       E E E E E E E E E E        , + + + + +	(eho-.	(#s(^	( 
	( 	( 	( 	(
%(/ 
d 
 
 
 

[ 
T 
 
 
 
Ieho I$ I I I I" IMAE	  .S#XDE -c3h=>	
    , OSLPAE!&E E8?E>4S>9:E E #s(O	E
 !!4d38nd!JKE #5c3h)G#HIE -c3h=>E E E E E EZ OSAE' '8?'>4S>9:' ' #s(O	'
 !!4d38nd!JK' -c3h=>' ' ' ' 'X IMAE- -8?-.S#XDE- -c3h=>- 	- - - -h OSLPAET T8?T>4S>9:T #s(OT !!4d38nd!JK	T
 #5c3h)G#HIT -c3h=>T T T T Tv OSAEg g8?g>4S>9:g #s(Og !!4d38nd!JK	g
 -c3h=>g g g g gZ OS!& CGAE+ +++ !!4d38nd!JK+ 	+
 + >4S>4?@+ -c3h=>+ + + + + +` OS CGAE@ @@ !4d38nd!JK@ @ >4S>4?@	@
 -c3h=>@ @ @ @ @J OS CGAE/ // !4d38nd!JK/ / >4S>4?@	/
 -c3h=>/ / / / /h OSCGAE	4 44 !4d38nd!JK4 >4S>4?@4 -c3h=>	4
 4 4 4 4r NR  !!4d38nd!JK 	     r&   