
    g/                     r    d Z ddlmZmZmZ ddlmZ ddlmZ  ej	        e
          Z G d de          ZdS )zPatchTST model configuration    )ListOptionalUnion)PretrainedConfig)loggingc            O           e Zd ZdZdZddddZddd	d
ddddddddddddddddddddddddddgdddddddddf'd ed!ed"ed#ed$ed%edededed&ed'ed(ed)ed*e	d+e	d,e	d-e	d.e	d/ed0ed1ed2ed3ed4e	d5ed6e
eeef                  d7e
e         d8ed9e	d:e
eee         ef                  d;e
e         d<e
ee                  d=ed>ed?e	d@edAedBe
e         dCefN fdDZ xZS )EPatchTSTConfigaH  
    This is the configuration class to store the configuration of an [`PatchTSTModel`]. It is used to instantiate an
    PatchTST model according to the specified arguments, defining the model architecture.
    [ibm/patchtst](https://huggingface.co/ibm/patchtst) architecture.

    Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        num_input_channels (`int`, *optional*, defaults to 1):
            The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
            multivariate targets.
        context_length (`int`, *optional*, defaults to 32):
            The context length of the input sequence.
        distribution_output (`str`, *optional*, defaults to `"student_t"`):
            The distribution emission head for the model when loss is "nll". Could be either "student_t", "normal" or
            "negative_binomial".
        loss (`str`, *optional*, defaults to `"mse"`):
            The loss function for the model corresponding to the `distribution_output` head. For parametric
            distributions it is the negative log likelihood ("nll") and for point estimates it is the mean squared
            error "mse".
        patch_length (`int`, *optional*, defaults to 1):
            Define the patch length of the patchification process.
        patch_stride (`int`, *optional*, defaults to 1):
            Define the stride of the patchification process.
        num_hidden_layers (`int`, *optional*, defaults to 3):
            Number of hidden layers.
        d_model (`int`, *optional*, defaults to 128):
            Dimensionality of the transformer layers.
        num_attention_heads (`int`, *optional*, defaults to 4):
            Number of attention heads for each attention layer in the Transformer encoder.
        share_embedding (`bool`, *optional*, defaults to `True`):
            Sharing the input embedding across all channels.
        channel_attention (`bool`, *optional*, defaults to `False`):
            Activate channel attention block in the Transformer to allow channels to attend each other.
        ffn_dim (`int`, *optional*, defaults to 512):
            Dimension of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
        norm_type (`str` , *optional*, defaults to `"batchnorm"`):
            Normalization at each Transformer layer. Can be `"batchnorm"` or `"layernorm"`.
        norm_eps (`float`, *optional*, defaults to 1e-05):
            A value added to the denominator for numerical stability of normalization.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for the attention probabilities.
        positional_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability in the positional embedding layer.
        path_dropout (`float`, *optional*, defaults to 0.0):
            The dropout path in the residual block.
        ff_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability used between the two layers of the feed-forward networks.
        bias (`bool`, *optional*, defaults to `True`):
            Whether to add bias in the feed-forward networks.
        activation_function (`str`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (string) in the Transformer.`"gelu"` and `"relu"` are supported.
        pre_norm (`bool`, *optional*, defaults to `True`):
            Normalization is applied before self-attention if pre_norm is set to `True`. Otherwise, normalization is
            applied after residual block.
        positional_encoding_type (`str`, *optional*, defaults to `"sincos"`):
            Positional encodings. Options `"random"` and `"sincos"` are supported.
        use_cls_token (`bool`, *optional*, defaults to `False`):
            Whether cls token is used.
        init_std (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated normal weight initialization distribution.
        share_projection (`bool`, *optional*, defaults to `True`):
            Sharing the projection layer across different channels in the forecast head.
        scaling (`Union`, *optional*, defaults to `"std"`):
            Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
            scaler is set to "mean".
        do_mask_input (`bool`, *optional*):
            Apply masking during the pretraining.
        mask_type (`str`, *optional*, defaults to `"random"`):
            Masking type. Only `"random"` and `"forecast"` are currently supported.
        random_mask_ratio (`float`, *optional*, defaults to 0.5):
            Masking ratio applied to mask the input data during random pretraining.
        num_forecast_mask_patches (`int` or `list`, *optional*, defaults to `[2]`):
            Number of patches to be masked at the end of each batch sample. If it is an integer,
            all the samples in the batch will have the same number of masked patches. If it is a list,
            samples in the batch will be randomly masked by numbers defined in the list. This argument is only used
            for forecast pretraining.
        channel_consistent_masking (`bool`, *optional*, defaults to `False`):
            If channel consistent masking is True, all the channels will have the same masking pattern.
        unmasked_channel_indices (`list`, *optional*):
            Indices of channels that are not masked during pretraining. Values in the list are number between 1 and
            `num_input_channels`
        mask_value (`int`, *optional*, defaults to 0):
            Values in the masked patches will be filled by `mask_value`.
        pooling_type (`str`, *optional*, defaults to `"mean"`):
            Pooling of the embedding. `"mean"`, `"max"` and `None` are supported.
        head_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for head.
        prediction_length (`int`, *optional*, defaults to 24):
            The prediction horizon that the model will output.
        num_targets (`int`, *optional*, defaults to 1):
            Number of targets for regression and classification tasks. For classification, it is the number of
            classes.
        output_range (`list`, *optional*):
            Output range for regression task. The range of output values can be set to enforce the model to produce
            values within a range.
        num_parallel_samples (`int`, *optional*, defaults to 100):
            The number of samples is generated in parallel for probabilistic prediction.


    ```python
    >>> from transformers import PatchTSTConfig, PatchTSTModel

    >>> # Initializing an PatchTST configuration with 12 time steps for prediction
    >>> configuration = PatchTSTConfig(prediction_length=12)

    >>> # Randomly initializing a model (with random weights) from the configuration
    >>> model = PatchTSTModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```patchtstd_modelnum_attention_headsnum_hidden_layers)hidden_sizer   r          	student_tmse         TFi   	batchnormgh㈵>g        gelusincosg{Gz?stdNrandomg      ?   r   mean   d   num_input_channelscontext_lengthdistribution_outputlosspatch_lengthpatch_strideshare_embeddingchannel_attentionffn_dim	norm_typenorm_epsattention_dropoutpositional_dropoutpath_dropout
ff_dropoutbiasactivation_functionpre_normpositional_encoding_typeuse_cls_tokeninit_stdshare_projectionscalingdo_mask_input	mask_typerandom_mask_rationum_forecast_mask_patcheschannel_consistent_maskingunmasked_channel_indices
mask_valuepooling_typehead_dropoutprediction_lengthnum_targetsoutput_rangenum_parallel_samplesc(                 j   || _         || _        || _        || _        |'| _        || _        |	| _        || _        || _        || _	        |
| _
        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        | | _        |!| _         |"| _!        |#| _"        || _#        |$| _$        |'| _        |%| _%        |&| _&         tO                      j(        di |( d S )N ))r    r   r"   r!   rB   r   r   r'   r   r*   r%   r&   r(   r)   r+   r,   r-   r.   r/   r0   r1   r2   r3   r5   r#   r$   r6   r7   r8   r9   r:   r;   r<   r=   r>   r4   r?   r@   rA   super__init__)*selfr   r    r!   r"   r#   r$   r   r   r   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   kwargs	__class__s*                                            o/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/patchtst/configuration_patchtst.pyrF   zPatchTSTConfig.__init__   sk   d -"4	#6 $8! #6 !2!2.!2" "4($	#6  (@%*  )( +"!2)B&*D'(@%$ )( !1!2 %9! '(""6"""""    )__name__
__module____qualname____doc__
model_typeattribute_mapintstrboolfloatr   r   r   rF   __classcell__)rI   s   @rJ   r	   r	      s       p pd J 40 M #$ #.!"#$ $"'$#&$'!#)(0#!%.3(,!#&FGS5:8<"!!#'+$']i# i#  i# 	i#
 !i# i# i# i# i# i# !i# i#  i#  !i#" #i#$ %i#& !'i#( ")i#* +i#, -i#. /i#0 !1i#2 3i#4 #&5i#6 7i#8 9i#: ;i#< %T	*+=i#@  ~Ai#B Ci#D !Ei#F $,E$s)S.,A#BGi#H %-TNIi#J #+49"5Ki#L Mi#P Qi#R Si#T Ui#V Wi#X tnYi#\ "]i# i# i# i# i# i# i# i# i# i#rK   r	   N)rO   typingr   r   r    transformers.configuration_utilsr   transformers.utilsr   
get_loggerrL   loggerr	   rD   rK   rJ   <module>r\      s    # " ( ( ( ( ( ( ( ( ( ( = = = = = = & & & & & & 
	H	%	%c# c# c# c# c#% c# c# c# c# c#rK   