
    ghH                     n    d Z ddlZddlZddlmZ ddlmZ  ej        e          Z	 G d de          Z
dS )zWavLM model configuration    N   )PretrainedConfig)loggingc                        e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d' fd%	Zed&             Z xZS )(WavLMConfiga-  
    This is the configuration class to store the configuration of a [`WavLMModel`]. It is used to instantiate an WavLM
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the WavLM
    [microsoft/wavlm-base](https://huggingface.co/microsoft/wavlm-base) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 32):
            Vocabulary size of the WavLM model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`WavLMModel`]. Vocabulary size of the model. Defines the different tokens
            that can be represented by the *inputs_ids* passed to the forward method of [`WavLMModel`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` are supported.
        hidden_dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        activation_dropout (`float`, *optional*, defaults to 0.1):
            The dropout ratio for activations inside the fully connected layer.
        attention_dropout (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        final_dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for the final projection layer of [`WavLMForCTC`].
        layerdrop (`float`, *optional*, defaults to 0.1):
            The LayerDrop probability. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more
            details.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        feat_extract_norm (`str`, *optional*, defaults to `"group"`):
            The norm to be applied to 1D convolutional layers in feature encoder. One of `"group"` for group
            normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
            convolutional layers.
        feat_proj_dropout (`float`, *optional*, defaults to 0.0):
            The dropout probability for output of the feature encoder.
        feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the 1D convolutional layers of the feature
            extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
        conv_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
            A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
            feature encoder. The length of *conv_dim* defines the number of 1D convolutional layers.
        conv_stride (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
            A tuple of integers defining the stride of each 1D convolutional layer in the feature encoder. The length
            of *conv_stride* defines the number of convolutional layers and has to match the length of *conv_dim*.
        conv_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
            A tuple of integers defining the kernel size of each 1D convolutional layer in the feature encoder. The
            length of *conv_kernel* defines the number of convolutional layers and has to match the length of
            *conv_dim*.
        conv_bias (`bool`, *optional*, defaults to `False`):
            Whether the 1D convolutional layers have a bias.
        num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
            Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
            embeddings layer.
        num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
            Number of groups of 1D convolutional positional embeddings layer.
        do_stable_layer_norm (`bool`, *optional*, defaults to `False`):
            Whether to apply *stable* layer norm architecture of the Transformer encoder. `do_stable_layer_norm is
            True` corresponds to applying layer norm before the attention layer, whereas `do_stable_layer_norm is
            False` corresponds to applying layer norm after the attention layer.
        apply_spec_augment (`bool`, *optional*, defaults to `True`):
            Whether to apply *SpecAugment* data augmentation to the outputs of the feature encoder. For reference see
            [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
            Recognition](https://arxiv.org/abs/1904.08779).
        mask_time_prob (`float`, *optional*, defaults to 0.05):
            Propability of each feature vector along the time axis to be chosen as the start of the vector span to be
            masked. Approximately `mask_time_prob * sequence_length // mask_time_length` feature vectors will be masked
            along the time axis. This is only relevant if `apply_spec_augment is True`.
        mask_time_length (`int`, *optional*, defaults to 10):
            Length of vector span along the time axis.
        mask_time_min_masks (`int`, *optional*, defaults to 2),:
            The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
            irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
            mask_time_min_masks''
        mask_feature_prob (`float`, *optional*, defaults to 0.0):
            Propability of each feature vector along the feature axis to be chosen as the start of the vector span to
            be masked. Approximately `mask_time_prob * hidden_size // mask_time_length` feature vectors will be masked
            along the time axis. This is only relevant if `apply_spec_augment is True`.
        mask_feature_length (`int`, *optional*, defaults to 10):
            Length of vector span along the feature axis.
        num_codevectors_per_group (`int`, *optional*, defaults to 320):
            Number of entries in each quantization codebook (group).
        num_codevector_groups (`int`, *optional*, defaults to 2):
            Number of codevector groups for product codevector quantization.
        contrastive_logits_temperature (`float`, *optional*, defaults to 0.1):
            The temperature *kappa* in the contrastive loss.
        num_negatives (`int`, *optional*, defaults to 100):
            Number of negative samples for the contrastive loss.
        codevector_dim (`int`, *optional*, defaults to 256):
            Dimensionality of the quantized feature vectors.
        proj_codevector_dim (`int`, *optional*, defaults to 256):
            Dimensionality of the final projection of both the quantized and the transformer features.
        diversity_loss_weight (`int`, *optional*, defaults to 0.1):
            The weight of the codebook diversity loss component.
        ctc_loss_reduction (`str`, *optional*, defaults to `"mean"`):
            Specifies the reduction to apply to the output of `torch.nn.CTCLoss`. Only relevant when training an
            instance of [`WavLMForCTC`].
        ctc_zero_infinity (`bool`, *optional*, defaults to `False`):
            Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`. Infinite losses mainly
            occur when the inputs are too short to be aligned to the targets. Only relevant when training an instance
            of [`WavLMForCTC`].
        use_weighted_layer_sum (`bool`, *optional*, defaults to `False`):
            Whether to use a weighted average of layer outputs with learned weights. Only relevant when using an
            instance of [`WavLMForSequenceClassification`].
        classifier_proj_size (`int`, *optional*, defaults to 256):
            Dimensionality of the projection before token mean-pooling for classification.
        tdnn_dim (`Tuple[int]` or `List[int]`, *optional*, defaults to `(512, 512, 512, 512, 1500)`):
            A tuple of integers defining the number of output channels of each 1D convolutional layer in the *TDNN*
            module of the *XVector* model. The length of *tdnn_dim* defines the number of *TDNN* layers.
        tdnn_kernel (`Tuple[int]` or `List[int]`, *optional*, defaults to `(5, 3, 3, 1, 1)`):
            A tuple of integers defining the kernel size of each 1D convolutional layer in the *TDNN* module of the
            *XVector* model. The length of *tdnn_kernel* has to match the length of *tdnn_dim*.
        tdnn_dilation (`Tuple[int]` or `List[int]`, *optional*, defaults to `(1, 2, 3, 1, 1)`):
            A tuple of integers defining the dilation factor of each 1D convolutional layer in *TDNN* module of the
            *XVector* model. The length of *tdnn_dilation* has to match the length of *tdnn_dim*.
        xvector_output_dim (`int`, *optional*, defaults to 512):
            Dimensionality of the *XVector* embedding vectors.
        add_adapter (`bool`, *optional*, defaults to `False`):
            Whether a convolutional network should be stacked on top of the Wav2Vec2 Encoder. Can be very useful for
            warm-starting Wav2Vec2 for SpeechEncoderDecoder models.
        adapter_kernel_size (`int`, *optional*, defaults to 3):
            Kernel size of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
        adapter_stride (`int`, *optional*, defaults to 2):
            Stride of the convolutional layers in the adapter network. Only relevant if `add_adapter is True`.
        num_adapter_layers (`int`, *optional*, defaults to 3):
            Number of convolutional layers that should be used in the adapter network. Only relevant if `add_adapter is
            True`.
        output_hidden_size (`int`, *optional*):
            Dimensionality of the encoder output layer. If not defined, this defaults to *hidden-size*. Only relevant
            if `add_adapter is True`.

    Example:

    ```python

    ```

    Example:

    ```python
    >>> from transformers import WavLMConfig, WavLMModel

    >>> # Initializing a WavLM facebook/wavlm-base-960h style configuration
    >>> configuration = WavLMConfig()

    >>> # Initializing a model (with random weights) from the facebook/wavlm-base-960h style configuration
    >>> model = WavLMModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```wavlm             gelu皙?        {Gz?h㈵>group   r   r   r   r   r   r         r   r   r   r   r   
   r   r   r   r   r   r   F      @     T皙?r   r   d      meanr   r   r   r   i  r   r   r      r$   r$   r   r   r$   r$   r   P   r   r$   r   Nc8           
      J    t                      j        di |8|0|1|2d || _        || _        || _        t          |          | _        t          |          | _        t          |          | _        || _	        || _
        || _        || _        || _        t          | j                  | _        || _        || _        || _        || _        || _        |	| _        || _        |
| _        || _        || _        || _        || _        |/| _        || _        || _        |)| _        |*| _         t          | j                  | j        k    s:t          | j                  | j        k    st          | j                  | j        k    rOtC          dt          | j                   dt          | j                   dt          | j                   d          || _"        || _#        || _$        || _%        || _&        || _'        | | _(        |!| _)        |"| _*        |#| _+        |$| _,        |%| _-        |&| _.        |'| _/        |(| _0        |3| _1        |4| _2        |5| _3        |6| _4        |7p|| _5        |*| _         t          |+          | _6        t          |,          | _7        t          |-          | _8        |.| _9        d S )N)pad_token_idbos_token_ideos_token_idzConfiguration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = z`, `len(config.conv_stride) = z`, `len(config.conv_kernel) = z`. ):super__init__hidden_sizefeat_extract_normfeat_extract_activationlistconv_dimconv_strideconv_kernel	conv_biasnum_bucketsmax_bucket_distancenum_conv_pos_embeddingsnum_conv_pos_embedding_groupslennum_feat_extract_layersnum_hidden_layersintermediate_size
hidden_actnum_attention_headshidden_dropoutattention_dropoutactivation_dropoutfeat_proj_dropoutfinal_dropout	layerdroplayer_norm_epsinitializer_rangenum_ctc_classes
vocab_sizedo_stable_layer_normuse_weighted_layer_sumclassifier_proj_size
ValueErrorapply_spec_augmentmask_time_probmask_time_lengthmask_time_min_masksmask_feature_probmask_feature_lengthnum_codevectors_per_groupnum_codevector_groupscontrastive_logits_temperaturenum_negativescodevector_dimproj_codevector_dimdiversity_loss_weightctc_loss_reductionctc_zero_infinityadd_adapteradapter_kernel_sizeadapter_stridenum_adapter_layersoutput_hidden_sizetdnn_dimtdnn_kerneltdnn_dilationxvector_output_dim):selfrI   r.   r<   r?   r=   r>   r@   rB   rA   rC   rD   rE   rG   rF   r/   r0   r2   r3   r4   r5   r8   r9   r6   r7   rJ   rN   rO   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   rK   rL   rb   rc   rd   re   rH   r(   r)   r*   r]   r^   r_   r`   ra   kwargs	__class__s:                                                            i/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/wavlm/configuration_wavlm.pyr-   zWavLMConfig.__init__   s   v 	ss6s<frsssss&!2'>$X,,,,"&#6 '>$-J*'*4='9'9$!2!2$#6 ,!2"4!2*",!2.$$8!&<#$8! !""d&BBBD$%%)EEEDM""d&BBBI&&I IFI$JZF[F[I I 0343C/D/DI I I   #5, 0#6 !2#6  *C&%:".L+*,#6 %:" #5!2 '#6 ,"4"4"C %9! X,,!-00"4    c                 L    t          j        t          j        | j        d          S )Nr$   )	functoolsreduceoperatormulr3   )rf   s    ri   inputs_to_logits_ratioz"WavLMConfig.inputs_to_logits_ratioL  s    d.>BBBrj   )7r	   r
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   Fr   r   r   r   FTr   r   r   r   r   r   r   r   r   r    r    r   r!   FFr    r"   r#   r%   r   r&   r   r$   r   Fr   r   r   N)	__name__
__module____qualname____doc__
model_typer-   propertyrp   __classcell__)rh   s   @ri   r   r      s"       a aF J ! &4)* #&(""%'*!!$ +#%qI5 I5 I5 I5 I5 I5V C C XC C C C Crj   r   )rt   rl   rn   configuration_utilsr   utilsr   
get_loggerrq   loggerr   r+   rj   ri   <module>r|      s            3 3 3 3 3 3       
	H	%	%sC sC sC sC sC" sC sC sC sC sCrj   