
    gݘ                        d Z ddlZddlZddlmZ ddlmZmZmZm	Z	m
Z
 ddlZddlmZ ddlmZ ddlmZmZ dd	lmZmZ dd
lmZ ddlmZ ddlmZ  ej        e          Zd@dZdAdedefdZ defdZ!dedefdZ"dAdededefdZ#dededefdZ$dAdZ%dededede&fdZ'	 dBdededede&def
dZ(dedededefdZ)	 dCdededededef
dZ*dededed e&d!e&d"efd#Z+	 dDd&Z,dd'deded(efd)Z-	 dEdeded(edefd*Z.d+d,dededede&d-e&f
d.Z'	 	 	 	 dFdededede&ded/e&d-e&fd0Z/deded1ed2ede&d3e&fd4Z0	 	 	 dGdeded1ed2ed3e&de&defd5Z1ej2        e%ej3        e(ej4        e*ej5        e,ej6        e ej7        e#ej8        e.ej9        e!ej:        e/ej;        e1i
Z<	 	 	 dHd6e
e=ef         dedee         dee         d7ee>         f
d8Z? G d9 d:e          Z@ G d; d<e          ZA G d= d>e          ZBdId?ZCdS )Jz$PyTorch optimization for BERT model.    N)partial)CallableIterableOptionalTupleUnion)nn)	Optimizer)LambdaLRReduceLROnPlateau   )LayerWiseDummyOptimizerLayerWiseDummyScheduler)SchedulerType)logging)require_versionc                     dS Nr    )_s    U/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/optimization.py_get_constant_lambdar   $   s    1    	optimizer
last_epochc                 0    t          | t          |          S )a  
    Create a schedule with a constant learning rate, using the learning rate set in optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r   )r   r   )r   r   s     r   get_constant_scheduler   (   s     I3
KKKKr   c                     t          | fi |S )a  
    Create a schedule with a constant learning rate that decreases when a metric has stopped improving.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        kwargs (`dict`, *optional*):
            Extra parameters to be passed to the scheduler. See `torch.optim.lr_scheduler.ReduceLROnPlateau`
            for possible parameters.

    Return:
        `torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule.
    )r   )r   kwargss     r   get_reduce_on_plateau_scheduler"   9   s     Y11&111r   current_stepnum_warmup_stepsc                l    | |k     r-t          |           t          t          d|                    z  S dS )N      ?floatmax)r#   r$   s     r   ,_get_constant_schedule_with_warmup_lr_lambdar*   K   s9    &&&\""U3s4D+E+E%F%FFF3r   c                 R    t          t          |          }t          | ||          S )ad  
    Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
    increases linearly between 0 and the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r$   r   )r   r*   r   )r   r$   r   	lr_lambdas       r   !get_constant_schedule_with_warmupr.   Q   s-    " DWghhhIIyZ@@@@r   num_training_stepsc                    | |k     r-t          |           t          t          d|                    z  S t          dt          || z
            t          t          d||z
                      z            S )Nr           r'   )r#   r$   r/   s      r   *_get_linear_schedule_with_warmup_lr_lambdar2   f   sp    &&&\""U3q2B+C+C%D%DDDsE,|;<<uSL^aqLqErEr?s?sstttr   c                 R    t          t          ||          }t          | ||          S )a  
    Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
    a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r$   r/   )r   r2   r   )r   r$   r/   r   r-   s        r   get_linear_schedule_with_warmupr5   l   s5    & 2)-  I
 Iy*555r   
num_cyclesc                ^   | |k     r-t          |           t          t          d|                    z  S t          | |z
            t          t          d||z
                      z  }t          dddt          j        t          j        t          |          z  dz  |z            z   z            S )Nr   r1         ?r&          @r(   r)   mathcospir#   r$   r/   r6   progresss        r   *_get_cosine_schedule_with_warmup_lr_lambdar@      s     &&&\""U3q2B+C+C%D%DDD\$4455c!EWZjEj>k>k8l8llHsC3$'E*4E4E*E*Kh*V!W!WWXYYYr   r8   c                 T    t          t          |||          }t          | ||          S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`float`, *optional*, defaults to 0.5):
            The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
            following a half-cosine).
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r$   r/   r6   )r   r@   r   r   r$   r/   r6   r   r-   s         r   get_cosine_schedule_with_warmuprD      s8    2 2)-	  I Iy*555r   c                n   | |k     r-t          |           t          t          d|                    z  S t          | |z
            t          t          d||z
                      z  }|dk    rdS t          dddt          j        t          j        t          |          |z  dz  z            z   z            S )Nr   r&   r1   r8   r:   r>   s        r   =_get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambdarF      s     &&&\""U3q2B+C+C%D%DDD\$4455c!EWZjEj>k>k8l8llH3ssC3$'eJ6G6G(6RVY5Y*Z![![[\]]]r   c                 T    t          t          |||          }t          | ||          S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
    linearly between 0 and the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`int`, *optional*, defaults to 1):
            The number of hard restarts to use.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    rB   )r   rF   r   rC   s         r   2get_cosine_with_hard_restarts_schedule_with_warmuprH      s8    0 E)-	  I Iy*555r   lr_endpowerlr_initc                    | |k     r-t          |           t          t          d|                    z  S | |k    r||z  S ||z
  }||z
  }d| |z
  |z  z
  }|||z  z  |z   }	|	|z  S r   r'   )
r#   r$   r/   rI   rJ   rK   lr_rangedecay_stepspct_remainingdecays
             r   4_get_polynomial_decay_schedule_with_warmup_lr_lambdarQ      s     &&&\""U3q2B+C+C%D%DDD	*	*	*V#(+;;\,<<KK=%//&8wr   Hz>r&   c                     | j         d         }||k    st          d| d| d          t          t          |||||          }t	          | ||          S )a  
    Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the
    optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        lr_end (`float`, *optional*, defaults to 1e-7):
            The end LR.
        power (`float`, *optional*, defaults to 1.0):
            Power factor.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT
    implementation at
    https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.

    lrzlr_end (z#) must be smaller than initial lr ())r$   r/   rI   rJ   rK   )defaults
ValueErrorr   rQ   r   )r   r$   r/   rI   rJ   r   rK   r-   s           r   )get_polynomial_decay_schedule_with_warmuprX      sx    >  &GfYFYYwYYYZZZ<)-  I Iy*555r   )	timescalerY   c                    | |k     r-t          |           t          t          d|                    z  S ||z
  }dt          j        | |z   |z            z  }|S )Nr   r&   )r(   r)   r;   sqrt)r#   r$   rY   shiftrP   s        r   $_get_inverse_sqrt_schedule_lr_lambdar]   !  sb    &&&\""U3q2B+C+C%D%DDD((E$)\E1Y>???ELr   c                 `    ||pd}t          t          ||          }t          | ||          S )a  
    Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a
    warmup period which increases lr linearly from 0 to the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        timescale (`int`, *optional*, defaults to `num_warmup_steps`):
            Time scale.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    Ni'  )r$   rY   r   )r   r]   r   )r   r$   rY   r   r-   s        r   get_inverse_sqrt_scheduler_   )  s@    . $.	<O_ktuuuIIyZ@@@@r   r1   )min_lr_rater`   c                x   | |k     r-t          |           t          t          d|                    z  S t          | |z
            t          t          d||z
                      z  }ddt          j        t          j        t          |          z  dz  |z            z   z  }|d|z
  z  |z   }t          d|          S )Nr   r8   r&   r9   r   r:   )r#   r$   r/   r6   r`   r?   factors          r   r@   r@   G  s     &&&\""U3q2B+C+C%D%DDD\$4455c!EWZjEj>k>k8l8llHC$(47U:->->#>#Dx#OPPPQFq;'+5Fq&>>r   min_lrc                     ||t          d          ||| j        d         z  }n|t          d          t          t          ||||          }t	          | ||          S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to min_lr, after a warmup period during which it increases linearly between 0 and the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`float`, *optional*, defaults to 0.5):
            The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
            following a half-cosine).
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.
        min_lr (`float`, *optional*):
            The minimum learning rate to reach after the cosine schedule.
        min_lr_rate (`float`, *optional*):
            The minimum learning rate as a ratio of the initial learning rate. If set, `min_lr` should not be set.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    Nz/Only one of min_lr or min_lr_rate should be setrT   zLOne of min_lr or min_lr_rate should be set through the `lr_scheduler_kwargs`)r$   r/   r6   r`   )rW   rV   r   r@   r   )r   r$   r/   r6   r   rc   r`   r-   s           r   +get_cosine_with_min_lr_schedule_with_warmupre   R  s    F k5JKKK		y1$77		ghhh2)-  I Iy*555r   num_stable_stepsnum_decay_stepsmin_lr_ratioc                   | |k     r-t          |           t          t          d|                    z  S | ||z   k     rdS | ||z   |z   k     rt          | |z
  |z
            t          t          d|                    z  }t          dddt          j        t          j        t          |          z  dz  |z            z   z            }d|z
  |z  |z   S |S )Nr   r&   r1   r8   r9   r:   )r#   r$   rf   rg   r6   rh   r?   values           r   _get_wsd_scheduler_lambdark     s     &&&\""U3q2B+C+C%D%DDD&)9999s&)99OKKK(88;KKLLuUXYZ\kUlUlOmOmmCdhtwz9J9J/JS/PS[/[&\&\ \]^^l"e+l::r   c                 X    t          t          |||||          }t          | ||          S )a  
    Create a schedule with a learning rate that has three stages:
    1. linear increase from 0 to initial lr.
    2. constant lr (equal to initial lr).
    3. decrease following the values of the cosine function between the initial lr set in the optimizer to
       a fraction of initial lr.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_stable_steps (`int`):
            The number of steps for the stable phase.
        num_decay_steps (`int`):
            The number of steps for the cosine annealing phase.
        min_lr_ratio (`float`, *optional*, defaults to 0):
            The minimum learning rate as a ratio of the initial learning rate.
        num_cycles (`float`, *optional*, defaults to 0.5):
            The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
            following a half-cosine).
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    )r$   rf   rg   rh   r6   )r   rk   r   )r   r$   rf   rg   rh   r6   r   r-   s           r   get_wsd_schedulerm     s?    H !))'!  I Iy*555r   namescheduler_specific_kwargsc                   	 t          |           } t          |          }|t          |t                    r|j        }i 	|                                D ]}t          | ||         ||          	|<   	fd}|                                D ]}|j        r|                    |           t          ||j
        d                   S | t           j        k    r ||          S |i }| t           j        k    r	 ||fi |S |t          |  d          | t           j        k    r |||          S | t           j        k    r |||          S | t           j        k    r ||fd|i|S |t          |  d	           ||f||d
|S )a  
    Unified API to get any scheduler from its name.

    Args:
        name (`str` or `SchedulerType`):
            The name of the scheduler to use.
        optimizer (`torch.optim.Optimizer`):
            The optimizer that will be used during training.
        num_warmup_steps (`int`, *optional*):
            The number of warmup steps to do. This is not required by all schedulers (hence the argument being
            optional), the function will raise an error if it's unset and the scheduler type requires it.
        num_training_steps (`int``, *optional*):
            The number of training steps to do. This is not required by all schedulers (hence the argument being
            optional), the function will raise an error if it's unset and the scheduler type requires it.
        scheduler_specific_kwargs (`dict`, *optional*):
            Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler
            parameters will cause the scheduler function to raise a TypeError.
    N)r   r$   r/   c                 <    |                                            d S N)step)paramscheduler_dicts    r   scheduler_hookz%get_scheduler.<locals>.scheduler_hook  s"     5!&&(((((r   rT   )optimizer_dictrT   z; requires `num_warmup_steps`, please provide that argument.r,   r$   z= requires `num_training_steps`, please provide that argument.r4   )r   TYPE_TO_SCHEDULER_FUNCTION
isinstancer   rw   keysget_schedulerrequires_grad"register_post_accumulate_grad_hookr   rV   CONSTANTREDUCE_ON_PLATEAUrW   CONSTANT_WITH_WARMUPINVERSE_SQRTWARMUP_STABLE_DECAY)
rn   r   r$   r/   ro   schedule_funcrw   rt   rv   ru   s
            @r   r{   r{     s3   2 D.t4M I7N!O!O"1#((** 	 	E$1(/!1#5	% % %N5!!	) 	) 	) 	) 	)
 $((** 	I 	IE" I88HHH&nI[\`Iabbbb}%%%}Y''' ($&!}...}YDD*CDDD D]]]^^^}111}Y9IJJJJ})))}Y9IJJJJ}000}Ygg9IgMfggg !D___```=)-  $	  r   c                        e Zd ZdZ	 	 	 	 	 	 ddeej        j                 d	ed
e	eef         dedede
de
f fdZ ej                    ddefd            Z xZS )AdamWa;  
    Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay
    Regularization](https://arxiv.org/abs/1711.05101).

    Parameters:
        params (`Iterable[nn.parameter.Parameter]`):
            Iterable of parameters to optimize or dictionaries defining parameter groups.
        lr (`float`, *optional*, defaults to 0.001):
            The learning rate to use.
        betas (`Tuple[float,float]`, *optional*, defaults to `(0.9, 0.999)`):
            Adam's betas parameters (b1, b2).
        eps (`float`, *optional*, defaults to 1e-06):
            Adam's epsilon for numerical stability.
        weight_decay (`float`, *optional*, defaults to 0.0):
            Decoupled weight decay to apply.
        correct_bias (`bool`, *optional*, defaults to `True`):
            Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`).
        no_deprecation_warning (`bool`, *optional*, defaults to `False`):
            A flag used to disable the deprecation warning (set to `True` to disable the warning).
    MbP?g?g+?ư>r1   TFparamsrT   betasepsweight_decaycorrect_biasno_deprecation_warningc                    |st          j        dt                     t          d           |dk     rt	          d| d          d|d         cxk    rdk     sn t	          d|d          d	          d|d
         cxk    rdk     sn t	          d|d
          d	          d|k    st	          d| d          |||||d}t                                          ||           d S )NzThis implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warningtorch>=1.5.0r1   zInvalid learning rate: z - should be >= 0.0r   r&   zInvalid beta parameter: z - should be in [0.0, 1.0)r   zInvalid epsilon value: )rT   r   r   r   r   )warningswarnFutureWarningr   rW   super__init__)
selfr   rT   r   r   r   r   r   rV   	__class__s
            r   r   zAdamW.__init__D  s7    & 	M 	   	'''88NrNNNOOOeAh$$$$$$$$\a\\\]]]eAh$$$$$$$$\a\\\]]]czzOsOOOPPPuS,htuu*****r   Nclosurec                    d}|
 |            }| j         D ]}|d         D ]}|j        |j        }|j        rt          d          | j        |         }t          |          dk    r3d|d<   t          j        |          |d<   t          j        |          |d<   |d         |d         }}|d         \  }	}
|dxx         d	z  cc<   |                    |	          	                    |d
|	z
             |                    |
          
                    ||d
|
z
             |                                	                    |d                   }|d         }|d         r6d
|	|d         z  z
  }d
|
|d         z  z
  }|t          j        |          z  |z  }|                    |||            |d         dk    r'|	                    ||d          |d         z             |S )z
        Performs a single optimization step.

        Arguments:
            closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss.
        Nr   zJAdam does not support sparse gradients, please consider SparseAdam insteadr   rs   exp_avg
exp_avg_sqr   r   r&   alpha)rj   r   rT   r   r   r1   )param_groupsgrad	is_sparseRuntimeErrorstatelentorch
zeros_likemul_add_addcmul_r[   r;   addcdiv_)r   r   lossgrouppr   r   r   r   beta1beta2denom	step_sizebias_correction1bias_correction2s                  r   rs   z
AdamW.stepa  s&    799D& .	L .	LE8_ -L -L6>v> u&'sttt
1 u::??$%E&M','7':':E)$*/*:1*=*=E,'&+I&6l8K$W~uf" U##((cEk(CCC&&//d#+/NNN"))..uU|<<!$K	( ['*UeFm-C'C$'*UeFm-C'C$ )DI6F,G,G GJZ ZI

7E)
<<< (3..FF1eDk\E.4I%IFKKK[-L^ r   )r   r   r   r1   TFrr   )__name__
__module____qualname____doc__r   r	   	parameter	Parameterr(   r   boolr   r   no_gradr   rs   __classcell__r   s   @r   r   r   .  s         0 %1!!',+ +/0+ + UE\"	+
 + + + !%+ + + + + +: U]__; ;H ; ; ; _; ; ; ; ;r   r   c                        e Zd ZdZ	 	 	 	 	 	 	 	 	 d fd		Zed
             Zed             Zed             Zed             Z	 e
j                    dd            Z xZS )	Adafactora)  
    AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code:
    https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py

    Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that
    this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and
    `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
    `relative_step=False`.

    Arguments:
        params (`Iterable[nn.parameter.Parameter]`):
            Iterable of parameters to optimize or dictionaries defining parameter groups.
        lr (`float`, *optional*):
            The external learning rate.
        eps (`Tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`):
            Regularization constants for square gradient and parameter scale respectively
        clip_threshold (`float`, *optional*, defaults to 1.0):
            Threshold of root mean square of final gradient update
        decay_rate (`float`, *optional*, defaults to -0.8):
            Coefficient used to compute running averages of square
        beta1 (`float`, *optional*):
            Coefficient used for computing running averages of gradient
        weight_decay (`float`, *optional*, defaults to 0.0):
            Weight decay (L2 penalty)
        scale_parameter (`bool`, *optional*, defaults to `True`):
            If True, learning rate is scaled by root mean square
        relative_step (`bool`, *optional*, defaults to `True`):
            If True, time-dependent learning rate is computed instead of external learning rate
        warmup_init (`bool`, *optional*, defaults to `False`):
            Time-dependent learning rate computation depends on whether warm-up initialization is being used

    This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested.

    Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3):

        - Training without LR warmup or clip_threshold is not recommended.

           - use scheduled LR warm-up to fixed LR
           - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235)
        - Disable relative updates
        - Use scale_parameter=False
        - Additional optimizer operations like gradient clipping should not be used alongside Adafactor

    Example:

    ```python
    Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3)
    ```

    Others reported the following combination to work well:

    ```python
    Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
    ```

    When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`]
    scheduler as following:

    ```python
    from transformers.optimization import Adafactor, AdafactorSchedule

    optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
    lr_scheduler = AdafactorSchedule(optimizer)
    trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))
    ```

    Usage:

    ```python
    # replace AdamW with Adafactor
    optimizer = Adafactor(
        model.parameters(),
        lr=1e-3,
        eps=(1e-30, 1e-3),
        clip_threshold=1.0,
        decay_rate=-0.8,
        beta1=None,
        weight_decay=0.0,
        relative_step=False,
        scale_parameter=False,
        warmup_init=False,
    )
    ```NgKH9r   r&   皙r1   TFc           
          t          d           ||	rt          d          |
r|	st          d          ||||||||	|
d	}t                                          ||           d S )Nr   z;Cannot combine manual `lr` and `relative_step=True` optionsz0`warmup_init=True` requires `relative_step=True`)	rT   r   clip_threshold
decay_rater   r   scale_parameterrelative_stepwarmup_init)r   rW   r   r   )r   r   rT   r   r   r   r   r   r   r   r   rV   r   s               r   r   zAdafactor.__init__  s     	'''>m>Z[[[ 	Q} 	QOPPP ,$(.*&

 

 	*****r   c                    | d         }| d         r@| d         rd|d         z  nd}t          |dt          j        |d                   z            }d}| d         r"t          | d	         d
         |d                   }||z  S )NrT   r   r   r   rs   g{Gz?r&   r   r   r   RMS)minr;   r[   r)   )param_groupparam_staterel_step_szmin_stepparam_scales        r   _get_lrzAdafactor._get_lr  s    !$'' 	N5@5OYtk&111UYHhdiF8K.L.L(LMMK() 	Ik%03[5GHHK[((r   c                 D    t          |          dk    }| d         d u}||fS )N   r   )r   )r   param_shapefactoreduse_first_moments       r   _get_optionszAdafactor._get_options   s0    {##q(&w/t;)))r   c                 \    |                      d          |                                 dz  z  S )Nr   r8   )normnumel)tensors    r   _rmszAdafactor._rms&  s$    {{1~~3!677r   c                     | |                      dd          z                                                      d          }|                    d                                          }t	          j        ||          S )Nr   T)dimkeepdim)meanrsqrt_	unsqueezersqrtr   mul)exp_avg_sq_rowexp_avg_sq_colr_factorc_factors       r   _approx_sq_gradzAdafactor._approx_sq_grad*  sm     #^%8%8R%8%N%NNVVXXbbceff!++B//5577y8,,,r   c                 	   d}|
 |            }| j         D ]}|d         D ]}|j        |j        }|j        t          j        t          j        hv r|                                }|j        rt          d          | j	        |         }|j
        }|                     ||          \  }}	t          |          dk    rd|d<   |	rt          j        |          |d<   |rpt          j        |dd                                       |          |d<   t          j        |dd	         |dd         z                                 |          |d
<   nt          j        |          |d<   d|d<   n}|	r|d                             |          |d<   |r=|d                             |          |d<   |d
                             |          |d
<   n|d                             |          |d<   |}
|j        t          j        t          j        hv r|
                                }
|dxx         dz  cc<   |                     |
          |d<   |                     ||          }dt%          j        |d         |d                   z
  }|dz  |d         d         z   }|r|d         }|d
         }|                    |                              |                    d          d|z
             |                    |                              |                    d	          d|z
             |                     ||          }|                    |           n\|d         }|                    |                              |d|z
             |                                                    |          }|                    |                     |          |d         z                      d                     |                    |           |	rC|d         }|                    |d                                       |d|d         z
             |}|d         dk    r!|
                    |
|d          |z             |
                    |            |j        t          j        t          j        hv r|                    |
           |S )z
        Performs a single optimization step

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   z,Adafactor does not support sparse gradients.r   rs   r   r   r   r   r   r   r   r   r&   r   r   r   )r   r   r   )r   r   r   )r   r   dtyper   float16bfloat16r(   r   r   r   shaper   r   r   zerostor   r   r;   powr   r   r   r   r   div_clamp_copy_)r   r   r   r   r   r   r   
grad_shaper   r   p_data_fp32rT   beta2tupdater   r   r   r   s                     r   rs   zAdafactor.step2  s    799D& M	) M	)E8_ L) L)6>v:%-!@@@::<<D> W&'UVVV
1!Z
-1->->uj-Q-Q**u::??$%E&M' B+0+;D+A+Ai( E27+j"o2N2N2Q2QRV2W2W./27+j"oPZ[][^[^P_>_2`2`2c2cdh2i2i.//.3.>t.D.Dl+#$E%LL' E+0+;+>+>t+D+Di( K278H2I2L2LT2R2R./278H2I2L2LT2R2R.//.3L.A.D.DT.J.Jl+7u}en==="-"3"3"5"5Kf"#yy55e\\%//txfu\7JKKK'U5\!_4 ;%*+;%<N%*+;%<N"''//44V[[R[5H5HQTW]Q]4___"''//44V[[R[5H5HQTW]Q]4___ "11..QQFKK%%%%!&|!4JOOF++00f0NNN'--//44T::FTYYv..7G1HHPPUXPYYZZZB# %#I.GLLw0055fQwEW5YYY$F(A--$$[%:O9ORT9T$VVV  &)))7u}en===GGK(((YL)\ r   )	Nr   r&   r   Nr1   TTFrr   )r   r   r   r   r   staticmethodr   r   r   r   r   r   rs   r   r   s   @r   r   r     s       R Rn + + + + + +@ ) ) \) * * \*
 8 8 \8 - - \- U]__[ [ [ _[ [ [ [ [r   r   c                   *     e Zd ZdZd fd	Zd Z xZS )AdafactorSchedulea8  
    Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g.,
    for logging), this class creates a proxy object that retrieves the current lr values from the optimizer.

    It returns `initial_lr` during startup and the actual `lr` during stepping.
    r1   c                     fd}|j         D ]}|d<   t                                          ||           |j         D ]}|d= d S )Nc                     S rr   r   )r   
initial_lrs    r   r-   z-AdafactorSchedule.__init__.<locals>.lr_lambda  s	    r   r   )r   r   r   )r   r   r   r-   r   r   s     `  r   r   zAdafactorSchedule.__init__  s    	 	 	 	 	 + 	- 	-E",E,I...+ 	$ 	$El##	$ 	$r   c                 p    | j         fdj        D             }t          |          dk    r| j        }|S )Nc                     g | ]B}|d          d         j                             |j        |d          d                            CS )r   r   )r   r   r   ).0r   opts     r   
<listcomp>z,AdafactorSchedule.get_lr.<locals>.<listcomp>  sS     
 
 
Xq!&2 KKsyx);<==222r   r   )r   r   r   base_lrs)r   lrsr  s     @r   get_lrzAdafactorSchedule.get_lr  sR    n
 
 
 
)
 
 

 s88q==-C
r   r1   )r   r   r   r   r   r  r   r   s   @r   r   r     sV         $ $ $ $ $ $	 	 	 	 	 	 	r   r   c                 "    t          | |          S )aX  
    Get a proxy schedule for [`~optimization.Adafactor`]

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        initial_lr (`float`, *optional*, defaults to 0.0):
            Initial lr

    Return:
        [`~optimization.Adafactor`] proxy schedule object.


    )r   )r   r   s     r   get_adafactor_scheduler    s     Y
333r   rr   )r   )r8   r   )r   r   )rR   r&   r   )Nr   )r8   r   NN)r   r8   r   )NNNr  )Dr   r;   r   	functoolsr   typingr   r   r   r   r   r   r	   torch.optimr
   torch.optim.lr_schedulerr   r   trainer_pt_utilsr   r   trainer_utilsr   utilsr   utils.versionsr   
get_loggerr   loggerr   intr   r"   r*   r.   r2   r5   r(   r@   rD   rF   rH   rQ   rX   r]   r_   re   rk   rm   LINEARCOSINECOSINE_WITH_RESTARTS
POLYNOMIALr~   r   r   r   COSINE_WITH_MIN_LRr   rx   strdictr{   r   r   r   r  r   r   r   <module>r     s   + *         = = = = = = = = = = = = = =        ! ! ! ! ! ! @ @ @ @ @ @ @ @ N N N N N N N N ( ( ( ( ( (       + + + + + + 
	H	%	%   L LY LC L L L L"2i 2 2 2 2$s Y\    A A Ac A_b A A A A*uS uWZ ups u u u u6 6 6 66ZZ,/ZEHZV[Z Z Z Z vx6 66,/6EH6V[6or6 6 6 6D^^,/^EH^VY^ ^ ^ ^ rt6 66,/6EH6VY6kn6 6 6 6B  	
      , Y[+6 +6 +6 +6\ hl   s QT ad     []A AA,/A<?ATWA A A A> sv  ,/EHV[jo    16 161616 16 	16
 16 16 16 16 16 16h  	
      2 ,6 ,6,6,6 ,6 	,6
 ,6 ,6 ,6 ,6 ,6 ,6` 99&(ZG1&(I 9#%C$&Q%'7 " '+(,04T T
]"
#TT smT !	T
  (~T T T Tno o o o oI o o odn n n n n	 n n nb       <4 4 4 4 4 4r   