
    Ng                         d Z ddlZddlmZmZmZmZ ddlZddlZerddl	m
Z
 neZ
 G d dej        j                  ZdS )z~ PyTorch MADGRAD optimizer

MADGRAD: https://arxiv.org/abs/2101.11075

Code from: https://github.com/facebookresearch/madgrad
    N)TYPE_CHECKINGAnyCallableOptional)	_params_tc                        e Zd ZdZ	 	 	 	 	 ddeded	ed
ededef fdZedefd            Z	edefd            Z
 ej                    ddeeg ef                  dee         fd            Z xZS )MADGRADa  
    MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic
    Optimization.

    .. _MADGRAD: https://arxiv.org/abs/2101.11075

    MADGRAD is a general purpose optimizer that can be used in place of SGD or
    Adam may converge faster and generalize better. Currently GPU-only.
    Typically, the same learning rate schedule that is used for SGD or Adam may
    be used. The overall learning rate is not comparable to either method and
    should be determined by a hyper-parameter sweep.

    MADGRAD requires less weight decay than other methods, often as little as
    zero. Momentum values used for SGD or Adam's beta1 should work here also.

    On sparse problems both weight_decay and momentum should be set to 0.

    Arguments:
        params (iterable):
            Iterable of parameters to optimize or dicts defining parameter groups.
        lr (float):
            Learning rate (default: 1e-2).
        momentum (float):
            Momentum value in  the range [0,1) (default: 0.9).
        weight_decay (float):
            Weight decay, i.e. a L2 penalty (default: 0).
        eps (float):
            Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6).
    {Gz??r   ư>Fparamslrmomentumweight_decayepsdecoupled_decayc                 @   |dk     s|dk    rt          d| d          |dk    rt          d| d          |dk     rt          d| d          |dk     rt          d	          t          |||||
          }t                                          ||           d S )Nr      z	Momentum z must be in the range [0,1]zLearning rate z must be positivezWeight decay z must be non-negativezEps must be non-negative)r   r   r   r   r   )
ValueErrordictsuper__init__)	selfr   r   r   r   r   r   defaults	__class__s	           N/var/www/html/ai-engine/env/lib/python3.11/site-packages/timm/optim/madgrad.pyr   zMADGRAD.__init__7   s     a<<8q==NNNNOOO77CbCCCDDD!P\PPPQQQ778999sXLZik k k*****    returnc                     dS )NF r   s    r   supports_memory_efficient_fp16z&MADGRAD.supports_memory_efficient_fp16M   s    ur   c                     dS )NTr    r!   s    r   supports_flat_paramszMADGRAD.supports_flat_paramsQ   s    tr   Nclosurec           	      	   d}|5t          j                    5   |            }ddd           n# 1 swxY w Y   | j        D ]}|d         }|d         |z   }|d         }|d         }d|z
  }|d         D ]f}	|	j        |	j        }
|dk    r|
j        rt          d	          | j        |	         }t          |          d
k    rbd
|d<   t          j        |	          |d<   t          j        |	          |d<   |d
k    r)t          j	        |	          
                                |d<   |dxx         dz  cc<   |d         }|d         }|t          j        |d                   z  }|d
k    rW|d         r"|	                    d|d         |z  z
             n-|
j        rt          d          |
                    |	|           |
j        r|
                                }
|
                                }|	                    |
          }|                    |
          }|                    |
          }|                                                    d                              |          }|                                                    |                                |d          }|
|
z  }|                    ||           |                    ||           |                                                    d                              |          }|                    |
|           |                                                    ||           |                    |                                |d          }|                                                    |d           |	                    |d           H|d
k    rA|                    d                              |          }|	                    ||d          }n|d         }|                    |
|
|           |                    d                              |          }|                    |
|           |d
k    r-|	                    |                    ||d                     !|                    ||d          }|	                    d|z
                                ||           h|S )zPerforms a single optimization step.

        Arguments:
            closure (callable, optional): A closure that reevaluates the model and returns the loss.
        Nr   r   r   r   r   r   g        z5momentum != 0 is not compatible with sparse gradientsr   stepgrad_sum_sqsx0r   g      ?z;weight_decay option is not compatible with sparse gradients)alphagUUUUUU?)value)torchenable_gradparam_groupsgrad	is_sparseRuntimeErrorstatelen
zeros_likeclonedetachmathsqrtmul_add_coalesce_valuessparse_maskpowaddcdivpow_addcmul_copy_)r   r%   lossgroupr   r   r   r   ckpr1   r4   r(   r)   lambgrad_valp_maskedgrad_sum_sq_maskeds_maskedrms_masked_valsx0_masked_valsgrad_sqp_kp1_masked_valsrmsr*   zs                             r   r'   zMADGRAD.stepU   s    "$$ ! !wyy! ! ! ! ! ! ! ! ! ! ! ! ! ! ! & U	9 U	9E,Cts"B 0LZ(HXB8_ N9 N96>vs??t~?&'^___
1u::??$%E&M+0+;A+>+>E-(!&!1!!4!4E#J1}}&+k!nn&;&;&=&=df"#M2#JDIeFm444  1$$./ 9sU4[<%??@@@@> n"./l"m"mm		!<	888> 19==??D#||~~H }}T22H)4)@)@)F)F& }}T22H '9&@&@&B&B&F&Fu&M&M&R&RSV&W&WO%-%5%5%7%7%?%?@P@P@R@RTckl%?%m%mN #TkG$$WD$999&++G4+@@@&8&@&@&B&B&G&G&N&N&S&STW&X&XOFF4tF,,,$$&&++HD+AAA )7(>(>x?O?O?Q?QSbjl(>(m(m%$$&&++,=R+HHHFF82F....1}})ooe4499#>>YYq#QY77"4[  ((t4(@@@%//%0055c::C FF4tF,,,  1}}

1c
 < <====JJq#RJ88 q2v++AR+8888]N9` s   /33)r
   r   r   r   F)N)__name__
__module____qualname____doc__r   floatboolr   propertyr"   r$   r.   no_gradr   r   r'   __classcell__)r   s   @r   r	   r	      s>        B !"#$)+ ++ + 	+
  + + "+ + + + + +,     X d    X U]__b bHXb%i%89 bXe_ b b b _b b b b br   r	   )rW   r9   typingr   r   r   r   r.   torch.optimtorch.optim.optimizerr   optim	Optimizerr	   r    r   r   <module>rb      s      9 9 9 9 9 9 9 9 9 9 9 9      ///////I` ` ` ` `ek# ` ` ` ` `r   