
    g3                         d Z ddlZddlZddlmZ ddlZddlmZ ddl	m
Z
 ddlmZ ddlmZ d	d
lmZ  ej        e          Z G d de
          ZdS )z
Processor class for Bark
    N)Optional   )BatchFeature)ProcessorMixin)logging)get_file_from_repo   )AutoTokenizerc                        e Zd ZdZdZdgZddddZd fd	Ze	 dd
            Z		 	 	 dde
f fdZddefdZddee         fdZ	 	 	 	 	 	 	 ddZ xZS )BarkProcessora	  
    Constructs a Bark processor which wraps a text tokenizer and optional Bark voice presets into a single processor.

    Args:
        tokenizer ([`PreTrainedTokenizer`]):
            An instance of [`PreTrainedTokenizer`].
        speaker_embeddings (`Dict[Dict[str]]`, *optional*):
            Optional nested speaker embeddings dictionary. The first level contains voice preset names (e.g
            `"en_speaker_4"`). The second level contains `"semantic_prompt"`, `"coarse_prompt"` and `"fine_prompt"`
            embeddings. The values correspond to the path of the corresponding `np.ndarray`. See
            [here](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c) for
            a list of `voice_preset_names`.

    r
   	tokenizer   r	   semantic_promptcoarse_promptfine_promptNc                 X    t                                          |           || _        d S N)super__init__speaker_embeddings)selfr   r   	__class__s      d/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/bark/processing_bark.pyr   zBarkProcessor.__init__<   s)    ###"4    speaker_embeddings_path.jsonc                    |6t          |||                    dd          |                    dd          |                    dd          |                    dd          |                    dd          |                    dd          |                    d	d          |                    d
d          
  
        }|?t                              dt          j                            ||           d           d}n>t          |          5 }t          j	        |          }ddd           n# 1 swxY w Y   nd}t          j        |fi |} | ||          S )a  
        Instantiate a Bark processor associated with a pretrained model.

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                This can be either:

                - a string, the *model id* of a pretrained [`BarkProcessor`] hosted inside a model repo on
                  huggingface.co.
                - a path to a *directory* containing a processor saved using the [`~BarkProcessor.save_pretrained`]
                  method, e.g., `./my_model_directory/`.
            speaker_embeddings_dict_path (`str`, *optional*, defaults to `"speaker_embeddings_path.json"`):
                The name of the `.json` file containing the speaker_embeddings dictionnary located in
                `pretrained_model_name_or_path`. If `None`, no speaker_embeddings is loaded.
            **kwargs
                Additional keyword arguments passed along to both
                [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`].
        N	subfolder	cache_dirforce_downloadFproxiesresume_downloadlocal_files_onlyuse_auth_tokenrevisionr   r   r    r!   r"   r#   tokenr%   `z` does not exists
                    , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
                    dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.)r   r   )r   poploggerwarningospathjoinopenjsonloadr
   from_pretrained)cls!pretrained_processor_name_or_pathspeaker_embeddings_dict_pathkwargsspeaker_embeddings_pathr   speaker_embeddings_jsonr   s           r   r2   zBarkProcessor.from_pretrainedA   s   . (3&81, **[$77 **[$77%zz*:EBB

9d33 &

+<d C C!',>!F!Fjj!1488J55' ' '# '.a"',,'HIeff a a a  
 &*""122 L6M)-3J)K)K&L L L L L L L L L L L L L L L "&!12S^^W]^^	sY;MNNNNs   D..D25D2r   Fpush_to_hubc                    | j         Ct          j        t          j                            ||d          d           i }||d<   | j         D ]}|dk    r|                     |          }i }	| j         |         D ]r}
t          j        t          j                            |d         || d|
           ||
         d           t          j                            || d|
 d	          |	|
<   s|	||<   t          t          j                            ||          d
          5 }t          j
        ||           ddd           n# 1 swxY w Y    t                      j        ||fi | dS )a}  
        Saves the attributes of this processor (tokenizer...) in the specified directory so that it can be reloaded
        using the [`~BarkProcessor.from_pretrained`] method.

        Args:
            save_directory (`str` or `os.PathLike`):
                Directory where the tokenizer files and the speaker embeddings will be saved (directory will be created
                if it does not exist).
            speaker_embeddings_dict_path (`str`, *optional*, defaults to `"speaker_embeddings_path.json"`):
                The name of the `.json` file that will contains the speaker_embeddings nested path dictionnary, if it
                exists, and that will be located in `pretrained_model_name_or_path/speaker_embeddings_directory`.
            speaker_embeddings_directory (`str`, *optional*, defaults to `"speaker_embeddings/"`):
                The name of the folder in which the speaker_embeddings arrays will be saved.
            push_to_hub (`bool`, *optional*, defaults to `False`):
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
            kwargs:
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
        Nv2T)exist_okrepo_or_path_F)allow_picklez.npyw)r   r,   makedirsr-   r.   _load_voice_presetnpsaver/   r0   dumpr   save_pretrained)r   save_directoryr5   speaker_embeddings_directoryr9   r6   embeddings_dict
prompt_keyvoice_presettmp_dictkeyfpr   s               r   rF   zBarkProcessor.save_pretrainedv   s   8 ".K^5QSWXXcghhhh O.<ON+"5 ; ;
//#'#:#::#F#FL!H#6zB n nGLL / ?A]bl_t_tor_t_t  )-).    )+5QV`SlSlcfSlSlSl(m(m2:OJ/bgll>3OPPRUVV /Z\	/2.../ / / / / / / / / / / / / / / 	 FFvFFFFFs    EE	ErK   c                    | j         |         }i }dD ]a}||vrt          d| d| d          t          | j                             dd          ||         |                    dd           |                    dd           |                    d	d
          |                    dd           |                    dd           |                    dd
          |                    dd           |                    dd           
  
        }|St          dt
          j                            | j                             dd          ||                    d| d          t          j	        |          ||<   c|S )Nr   #Voice preset unrecognized, missing z% as a key in self.speaker_embeddings[z].r=   /r   r   r    Fr!   r"   r#   r$   r%   r&   r(   z{` does not exists
                    , no preloaded voice preset will be used - Make sure to provide correct paths to the z 
                    embeddings.)
r   
ValueErrorr   getr)   r,   r-   r.   rC   r1   )r   rK   r6   voice_preset_pathsvoice_preset_dictrM   r-   s          r   rB   z BarkProcessor._load_voice_preset   s   !4\BF 	3 	3C,,, t#ttdpttt   &'++NC@@"3' **[$77 **[$77%zz*:EBB

9d33 &

+<d C C!',>!F!Fjj!1488J55  D | #"',,t'>'B'B>SV'W'WXjknXopp # #jv# # #   &(WT]]c""  r   c           	         dD ]}||vrt          d| d          t          ||         t          j                  s-t	          | dt          | j        |                    d          t          ||         j                  | j        |         k    r-t          | dt          | j        |                    d          d S )Nr   rP   z
 as a key.z voice preset must be a z
D ndarray.)	rR   
isinstancerC   ndarray	TypeErrorstrpreset_shapelenshape)r   rK   rM   s      r   _validate_voice_preset_dictz)BarkProcessor._validate_voice_preset_dict   s    F 	j 	jC,&& !Vs!V!V!VWWWl3/<< i3 g gDDUVYDZ@[@[ g g ghhh<$*++t/@/EEE C!h!hTEVWZE[A\A\!h!h!hiii F	j 	jr   pt   Tc           
         |t          |t                    s~t          |t                    r&| j        || j        v r|                     |          }nCt          |t                    r|                    d          s|dz   }t          j        |          }| | j        |fi | t          ||          } | j
        |f|d||||d|}	|||	d<   |	S )a  
        Main method to prepare for the model one or several sequences(s). This method forwards the `text` and `kwargs`
        arguments to the AutoTokenizer's [`~AutoTokenizer.__call__`] to encode the text. The method also proposes a
        voice preset which is a dictionary of arrays that conditions `Bark`'s output. `kwargs` arguments are forwarded
        to the tokenizer and to `cached_file` method if `voice_preset` is a valid filename.

        Args:
            text (`str`, `List[str]`, `List[List[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            voice_preset (`str`, `Dict[np.ndarray]`):
                The voice preset, i.e the speaker embeddings. It can either be a valid voice_preset name, e.g
                `"en_speaker_1"`, or directly a dictionnary of `np.ndarray` embeddings for each submodel of `Bark`. Or
                it can be a valid file name of a local `.npz` single voice preset.
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors of a particular framework. Acceptable values are:

                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return NumPy `np.ndarray` objects.

        Returns:
            Tuple([`BatchEncoding`], [`BatchFeature`]): A tuple composed of a [`BatchEncoding`], i.e the output of the
            `tokenizer` and a [`BatchFeature`], i.e the voice preset with the right tensors type.
        Nz.npz)datatensor_type
max_length)return_tensorspaddingrd   return_attention_maskreturn_token_type_idsadd_special_tokenshistory_prompt)rW   dictrZ   r   rB   endswithrC   r1   r^   r   r   )
r   textrK   re   rd   ri   rg   rh   r6   encoded_texts
             r   __call__zBarkProcessor.__call__   s"   H #J|T,J,J#<--5+7 D$;;;#66|DD lC00 99N9Nv9V9V 9#/&#8L!w|44#,D,\DDVDDD'\~VVVL%t~	
) !"7"71	
 	
 	
 	
 #-9L)*r   r   )r   )r   r   F)NNr_   r`   FTF)__name__
__module____qualname____doc__tokenizer_class
attributesr[   r   classmethodr2   boolrF   rZ   rB   r   rk   r^   ro   __classcell__)r   s   @r   r   r   #   sT         &OJ  L5 5 5 5 5 5
 Mk2O 2O 2O [2On &D%9!7G 7G
 7G 7G 7G 7G 7G 7Gr! !s ! ! ! !B	j 	j 	j 	j 	j 	j  "#D D D D D D D Dr   r   )rs   r0   r,   typingr   numpyrC   feature_extraction_utilsr   processing_utilsr   utilsr   	utils.hubr   autor
   
get_loggerrp   r*   r    r   r   <module>r      s      				           4 4 4 4 4 4 . . . . . .       + + + + + +             
	H	%	%| | | | |N | | | | |r   