
    gR                       d Z ddlZddlZddlZddlZddlmZ ddlmZm	Z	m
Z
mZmZmZmZ ddlZddlmZ ddlmZ ddlmZ ddlmZmZmZmZmZmZ dd	lmZmZmZm Z  dd
l!m"Z"m#Z#m$Z$m%Z%m&Z& ddl'm(Z( ddl)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/ ddl0m1Z1 ddl2m3Z3 ddl4m5Z5m6Z6  e,            rddl7m8Z8 erddl9m:Z:  e.j;        e<          Z=dZ>dZ?e G d de%                      Z@dejA        deBdeBfdZC G d dejD                  ZE G d dejD                  ZF G d  d!eF          ZG G d" d#eF          ZHeFeHeGd$ZI G d% d&ejD                  ZJ G d' d(e(          ZKd)ZLd*ZMd+ZN G d, d-eK          ZO e*d.eL           G d/ d0eK                      ZP e*d1eL           G d2 d3eKe                      ZQ e*d4eL           G d5 d6e(e                      ZRdS )7zPyTorch Musicgen model.    N)	dataclass)TYPE_CHECKINGAnyDictListOptionalTupleUnion)CrossEntropyLoss   )ACT2FN)%ClassifierFreeGuidanceLogitsProcessorGenerationConfigGenerationMixinGenerationModeLogitsProcessorListStoppingCriteriaList)_prepare_4d_attention_mask#_prepare_4d_attention_mask_for_sdpa!_prepare_4d_causal_attention_mask*_prepare_4d_causal_attention_mask_for_sdpa)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentionsModelOutputSeq2SeqLMOutput)PreTrainedModel)add_start_docstrings%add_start_docstrings_to_model_forwardis_flash_attn_2_available#is_flash_attn_greater_or_equal_2_10loggingreplace_return_docstrings   )
AutoConfig	AutoModel   )MusicgenConfigMusicgenDecoderConfig)_flash_attention_forward)BaseStreamerr)   zfacebook/musicgen-smallc                   ^    e Zd ZU dZdZeej                 ed<   dZ	ej
        ed<   dZeed<   dS )MusicgenUnconditionalInputa%  
    Args:
        encoder_outputs  (`Tuple[torch.FloatTensor]` of length 1, with tensor shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the text encoder model.
        attention_mask (`torch.LongTensor`)  of shape `(batch_size, sequence_length)`, *optional*):
            Encoder attention mask to avoid performing attention on padding token indices. Mask values selected in `[0,
            1]`: 1 for tokens that are **not masked**, 0 for tokens that are **masked**.
        guidance_scale (`float`, *optional*):
            Guidance scale for classifier free guidance, setting the balance between the conditional logits (predicted
            from the prompts) and the unconditional logits (predicted without prompts).
    Nencoder_outputsattention_maskguidance_scale)__name__
__module____qualname____doc__r/   r	   torchFloatTensor__annotations__r0   
LongTensorr1   float     j/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/musicgen/modeling_musicgen.pyr.   r.   L   sY         
 
 15OU5,-444'+NE$+++ NE     r<   r.   	input_idspad_token_iddecoder_start_token_idc                 2   |                      dd          } |                     | j                  }| dddf                                         |dddf<   |t	          d          ||d<   |t	          d          |                    |d	k    |           |S )
z1
    Shift input ids one token to the right.
    r(   r$   .NzSMake sure to set the decoder_start_token_id attribute of the model's configuration..r   zIMake sure to set the pad_token_id attribute of the model's configuration.)	transpose	new_zerosshapeclone
ValueErrormasked_fill_)r>   r?   r@   shifted_input_idss       r=   shift_tokens_rightrL   _   s    
 ##Aq))I!++IO<<!*38!4!:!:!<!<c122g%nooo 6fdeee""#4#<lKKKr<   c                        e Zd ZdZdedef fdZdedefdZededefd            Z e	j
                    dd	e	j        d
efd            Z xZS )%MusicgenSinusoidalPositionalEmbeddingzDThis module produces sinusoidal positional embeddings of any length.num_positionsembedding_dimc                     t                                                       || _        |                     ||           d S N)super__init__rP   make_weights)selfrO   rP   	__class__s      r=   rT   z.MusicgenSinusoidalPositionalEmbedding.__init__v   s=    *-77777r<   num_embeddingsc                 $   |                      ||          }t          | d          r+|                    | j        j        | j        j                  }t          j        |          | _        d| j        _        | j        	                                 d S )NweightsdtypedeviceF)
get_embeddinghasattrtorZ   r\   r]   nn	Parameterrequires_graddetach_)rV   rX   rP   emb_weightss       r=   rU   z2MusicgenSinusoidalPositionalEmbedding.make_weights{   s~    ((GG4## 	_%..t|/A$,J].^^K|K00%*"r<   c                    |dz  }t          j        d          |dz
  z  }t          j        t          j        |t          j                                                  | z            }t          j        | t          j                                                                      d          |                    d          z  }t          j        t          j	        |          t          j
        |          gd                              | d          }|dz  dk    r+t          j        |t          j        | d          gd          }|                    t          j                              S )z
        Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the
        description in Section 3.5 of "Attention Is All You Need".
        r$   i'  r(   r\   r   dimrB   )mathlogr6   exparangeint64r:   	unsqueezecatcossinviewzerosr`   get_default_dtype)rX   rP   half_dimembs       r=   r^   z3MusicgenSinusoidalPositionalEmbedding.get_embedding   s'    !A%huooA.iXU[AAAGGIISDPQQl>===CCEEOOPQRRUXUbUbcdUeUeei338a@@@EEnVXYY1!!)S%+na"@"@AqIIICvve-//000r<   r   r>   past_key_values_lengthc                    |                                 \  }}}t          j        |          |z                       |j                  }|| j                             d          k    r#|                     || j        z   | j                   | j        	                    d|
                    d                                                    S )Nr   rB   )sizer6   rm   r`   r]   rZ   rU   offsetrP   index_selectrs   detach)rV   r>   rx   bsz	codebooksseq_lenposition_idss          r=   forwardz-MusicgenSinusoidalPositionalEmbedding.forward   s    "+.."2"2YW--0FFJJ9K[\\T\&&q))))g3T5GHHH|((L,=,=b,A,ABBIIKKKr<   )r   )r2   r3   r4   r5   intrT   rU   staticmethodr^   r6   no_gradTensorr   __classcell__rW   s   @r=   rN   rN   s   s        NN8c 8# 8 8 8 8 8 8
3 s     1c 1# 1 1 1 \1 U]__L L Ls L L L _L L L L Lr<   rN   c                   h    e Zd ZdZ	 	 	 	 	 ddededed	ed
ededee         f fdZ	de
j        dedefdZ	 	 	 	 	 dde
j        dee
j                 deee
j                          dee
j                 dee
j                 dedee
j        ee
j                 eee
j                          f         fdZ xZS )MusicgenAttentionz=Multi-headed attention from 'Attention Is All You Need' paper        FTN	embed_dim	num_headsdropout
is_decoderbias	is_causalconfigc                 
   t                                                       || _        || _        || _        ||z  | _        || _        | j        |z  | j        k    rt          d| j         d| d          | j        dz  | _        || _	        || _
        t          j        |||          | _        t          j        |||          | _        t          j        |||          | _        t          j        |||          | _        d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      ࿩r   )rS   rT   r   r   r   head_dimr   rI   scalingr   r   ra   Lineark_projv_projq_projout_proj)	rV   r   r   r   r   r   r   r   rW   s	           r=   rT   zMusicgenAttention.__init__   s    	""!Y.MI%$.883dn 3 3%.3 3 3   }d*$"i	94@@@i	94@@@i	94@@@	)YTBBBr<   tensorr   r~   c                     |                     ||| j        | j                                      dd                                          S )Nr(   r$   )rs   r   r   rE   
contiguousrV   r   r   r~   s       r=   _shapezMusicgenAttention._shape   s<    {{3GGQQRSUVWWbbdddr<   hidden_stateskey_value_statespast_key_valuer0   layer_head_maskoutput_attentionsreturnc                 Z
   |du}|                                 \  }}	}
|                     |          | j        z  }|r6|4|d         j        d         |j        d         k    r|d         }|d         }n>|rU|                     |                     |          d|          }|                     |                     |          d|          }n||                     |                     |          d|          }|                     |                     |          d|          }t          j        |d         |gd          }t          j        |d         |gd          }nT|                     |                     |          d|          }|                     |                     |          d|          }| j	        r||f}|| j
        z  d| j        f} |                     ||	|          j        | } |j        | } |j        | }|                     d          }t          j        ||                    dd                    }|                                 || j
        z  |	|fk    r2t!          d|| j
        z  |	|f d|                                            ||                                 |d|	|fk    r+t!          d	|d|	|f d|                                            |                    || j
        |	|          |z   }|                    || j
        z  |	|          }t"          j                            |d          }||                                 | j
        fk    r-t!          d
| j
        f d|                                            |                    dddd          |                    || j
        |	|          z  }|                    || j
        z  |	|          }|r=|                    || j
        |	|          }|                    || j
        z  |	|          }nd}t"          j                            || j        | j                  }t          j        ||          }|                                 || j
        z  |	| j        fk    r7t!          d|| j
        z  |	| j        f d|                                            |                    || j
        |	| j                  }|                    dd          }|                    ||	| j                  }|                     |          }|||fS )#Input shape: Batch x Time x ChannelNr   r$   r(   rB   rh   z$Attention weights should be of size 	, but is z!Attention mask should be of size z/Head mask for a single layer should be of size ptraining `attn_output` should be of size )rz   r   r   rG   r   r   r   r6   rp   r   r   r   rs   reshapebmmrE   rI   ra   
functionalsoftmaxr   r   r   r   )rV   r   r   r   r0   r   r   is_cross_attentionr~   tgt_len_query_states
key_statesvalue_states
proj_shapesrc_lenattn_weightsattn_weights_reshaped
attn_probsattn_outputs                       r=   r   zMusicgenAttention.forward   s    .T9',,..Wa {{=11DL@ 	L*q!'*.>.DQ.GGG (*J)!,LL 	LT[[1A%B%BBLLJ;;t{{3C'D'Db#NNLL'T[[%?%?SIIJ;;t{{='A'A2sKKLN1$5z#BJJJJ 9nQ&7%FANNNLL T[[%?%?SIIJ;;t{{='A'A2sKKL? 	8 ),7NDN*B>
Ct{{<#>>CZP'Z'4
+|+Z8//!$$yz/C/CAq/I/IJJ3#7'"JJJ*dn8LgW^7_ * * %%''* *  
 %""$$a'(BBB ta'8Rtt]k]p]p]r]rtt   (,,S$.'7SSVddL',,S4>-A7GTTL},,\r,BB&##%%$.)::: 1t~FW 1 1',,..1 1   +//2q!<<|?P?PQTVZVdfmov?w?wwL',,S4>-A7GTTL 	)
 %1$5$5c4>7T[$\$\!055cDN6JGU\]]LL$(!]**<4<RVR_*``
i
L99#"6!OOO)C$.4H'SWS`3a ) )$$&&) )  
 "&&sDNGT]SS!++Aq11 "))#wGGmmK001>AAr<   )r   FTFNNNNNF)r2   r3   r4   r5   r   r:   boolr   r)   rT   r6   r   r   r	   r   r   r   s   @r=   r   r      s       GG  +/C CC C 	C
 C C C (C C C C C C>eU\ eC ec e e e e 488<1526"'vB vB|vB #5<0vB !u|!45	vB
 !.vB "%,/vB  vB 
u|Xel3XeEL>Q5RR	SvB vB vB vB vB vB vB vBr<   r   c                   2    e Zd ZdZ fdZdej        dedefdZ	 	 	 	 	 dd	ej        d
e	ej                 de	e
ej                          de	ej                 de	ej                 dede
ej        e	ej                 e	e
ej                          f         fdZ xZS )MusicgenFlashAttention2aL  
    Musicgen flash attention module. This module inherits from `MusicgenAttention` as the weights of the module stays
    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
    flash attention and deal with padding tokens in case the input contains any of them.
    c                 b     t                      j        |i | t                       | _        d S rR   )rS   rT   r!   _flash_attn_uses_top_left_mask)rV   argskwargsrW   s      r=   rT   z MusicgenFlashAttention2.__init__H  s9    $)&)))
 3V2W2W.W+++r<   r   r   r~   c                 F    |                     ||| j        | j                  S rR   )rs   r   r   r   s       r=   _reshapez MusicgenFlashAttention2._reshapeP  s    {{3GGGr<   NFr   r   r   r0   r   r   r   c           
      R   |rt          d          |d u}|                                \  }}	}
|                     |                     |          d|          }|r^|\|d         j        d         |j        d         k    r:|d                             dd          }|d                             dd          }ng|rV|                     |                     |          d|          }|                     |                     |          d|          }n||                     |                     |          d|          }|                     |                     |          d|          }t          j	        |d                             dd          |gd          }t          j	        |d                             dd          |gd          }nT|                     |                     |          d|          }|                     |                     |          d|          }| j
        r,|                    dd          |                    dd          f}|j        d         }|||d         j        d         z  }|j        }|t          j        k    rt          j                    rt          j                    }n3t          | j        d          r| j        j        }n| j        j        j        }t&                              d	| d
           |                    |          }|                    |          }|                    |          }t-          |||||	| j        r| j        nd| j        | j                  }|                    ||	d          }|                     |          }|sd }|||fS )NzDMusicgenFlashAttention2 attention does not support output_attentionsrB   r   r$   r(   rh   _pre_quantization_dtypezThe input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in .r   )r   r   use_top_left_mask)rI   rz   r   r   rG   rE   r   r   r6   rp   r   r\   float32is_autocast_enabledget_autocast_gpu_dtyper_   r   r   weightloggerwarning_oncer`   r+   r   r   r   r   r   r   )rV   r   r   r   r0   r   r   r   r~   q_lenr   r   r   r   
kv_seq_leninput_dtypetarget_dtyper   r   s                      r=   r   zMusicgenFlashAttention2.forwardS  s     	ecddd .T9%**,,UA }}T[[%?%?SII 	N*q!'*.>.DQ.GGG (*44Q::J)!,66q!<<LL 	Nt{{3C'D'Db#NNJ==5E)F)FCPPLL't{{='A'A2sKKJ==])C)CRMMLN1$5$?$?1$E$Ez#RXYZZZJ 9nQ&7&A&A!Q&G&G%V\]^^^LL t{{='A'A2sKKJ==])C)CRMML? 	X )221a88,:P:PQRTU:V:VWN%b)
%.+1"55J #(%-''(** 8$;==&?@@ 8#{B#{17$ $ $ $   (??<88L#|44J'??<88L.$(M:DLLsn"A	
 	
 	
 "))#ub99mmK00  	 LL.88r<   r   )r2   r3   r4   r5   rT   r6   r   r   r   r   r	   r   r   r   r   s   @r=   r   r   @  s;        X X X X XHu| Hc H H H H H 488<1526"'i9 i9|i9 #5<0i9 !u|!45	i9
 !.i9 "%,/i9  i9 
u|Xel3XeEL>Q5RR	Si9 i9 i9 i9 i9 i9 i9 i9r<   r   c                   
    e Zd Z	 	 	 	 	 ddej        deej                 deeej                          deej                 deej                 ded	eej        eej                 eeej                          f         f fd
Z xZ	S )MusicgenSdpaAttentionNFr   r   r   r0   r   r   r   c                    |s|At                               d           t                                          ||||||          S ||                    g d          t          j        |j                  j        k    	                                rAt                               d           t                                          ||||||          S |du}|
                                \  }}	}
|                     |          }|r6|4|d         j        d         |j        d	         k    r|d         }|d	         }n>|rU|                     |                     |          d
|          }|                     |                     |          d
|          }n||                     |                     |          d
|          }|                     |                     |          d
|          }t          j        |d         |gd          }t          j        |d	         |gd          }nT|                     |                     |          d
|          }|                     |                     |          d
|          }| j        r||f}|                     ||	|          }| j        r
||	d	k    rdnd}t
          j        j                            ||||| j        r| j        nd|          }|
                                || j        |	| j        fk    r5t5          d|| j        |	| j        f d|
                                           |                    d	d          }|                    ||	| j                  }|                     |          }|d|fS )r   Na  MusicgenModel is using MusicgenSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.)r   r   r0   r   r   )r(   r$   r   rh   a  `torch.nn.functional.scaled_dot_product_attention` does not support having an empty attention mask. Falling back to the manual attention implementation. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.Note that this probably happens because `guidance_scale>1` or because you used `get_unconditional_inputs`. See https://github.com/huggingface/transformers/issues/31189 for more information.r   r$   r(   rB   TFr   )	attn_mask	dropout_pr   r   r   )r   r   rS   r   meanr6   finfor\   minanyrz   r   rG   r   r   r   rp   r   r   ra   r   scaled_dot_product_attentionr   r   r   r   rI   rE   r   r   r   )rV   r   r   r   r0   r   r   r   r~   r   r   r   r   r   r   r   rW   s                   r=   r   zMusicgenSdpaAttention.forward  s     	 ;l   77??!1-- /"3 #    &$$$33u{>CW7X7X7\\aacc ' P   77??!1-- /"3 #    .T9',,..Wa {{=11 	L*q!'*.>.DQ.GGG (*J)!,LL 	LT[[1A%B%BBLLJ;;t{{3C'D'Db#NNLL'T[[%?%?SIIJ;;t{{='A'A2sKKLN1$5z#BJJJJ 9nQ&7%FANNNLL T[[%?%?SIIJ;;t{{='A'A2sKKL? 	8 ),7N{{<#>>
 !N`~/E'TU++DD[`	 h)FF$&*m<dll G 
 
 #t~w!NNN)CRVR_3` ) )$$&&) )  
 "++Aq11 "))#wGGmmK00D.00r<   r   )
r2   r3   r4   r6   r   r   r	   r   r   r   r   s   @r=   r   r     s         488<1526"'w1 w1|w1 #5<0w1 !u|!45	w1
 !.w1 "%,/w1  w1 
u|Xel3XeEL>Q5RR	Sw1 w1 w1 w1 w1 w1 w1 w1 w1 w1r<   r   )eagersdpaflash_attention_2c                   "    e Zd Zdef fdZ	 	 	 	 	 	 	 	 ddej        deej                 deej                 d	eej                 d
eej                 deej                 deeej                          dee	         dee	         dej        fdZ
 xZS )MusicgenDecoderLayerr   c           	         t                                                       |j        | _        t	          |j                 | j        |j        |j        ddd|          | _        |j	        | _	        t          |j                 | _        |j        | _        t          j        | j                  | _        t	          |j                 | j        |j        |j        dd|          | _        t          j        | j                  | _        t          j        | j        |j        d          | _        t          j        |j        | j        d          | _        t          j        | j                  | _        d S )NTF)r   r   r   r   r   r   r   )r   r   r   r   r   )rS   rT   hidden_sizer   MUSICGEN_ATTENTION_CLASSES_attn_implementationnum_attention_headsattention_dropout	self_attnr   r   activation_functionactivation_fnactivation_dropoutra   	LayerNormself_attn_layer_normencoder_attnencoder_attn_layer_normr   ffn_dimfc1fc2final_layer_normrV   r   rW   s     r=   rT   zMusicgenDecoderLayer.__init__B  s9   +3F4OPn0,
 
 
 ~#F$>?"(";$&L$@$@!6v7RSN&,
 
 
 (*|DN'C'C$9T^V^%HHH9V^T^%HHH "T^ < <r<   NFTr   r0   encoder_hidden_statesencoder_attention_maskr   cross_attn_layer_head_maskr   r   	use_cacher   c
                 x   |}
|                      |          }|
|dd         nd}|                     |||||          \  }}}t          j                            || j        | j                  }|
|z   }d}d}|z|}
|                     |          }|
|dd         nd}|                     ||||||          \  }}}t          j                            || j        | j                  }|
|z   }||z   }|}
|                     |          }| 	                    | 
                    |                    }t          j                            || j        | j                  }|                     |          }t          j                            || j        | j                  }|
|z   }|f}|r|||fz  }|	r||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size `(decoder_attention_heads,)`.
            past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        Nr$   )r   r   r0   r   r   r   r   )r   r   r0   r   r   r   )r   r   ra   r   r   r   r   r   r   r   r   r   r   )rV   r   r0   r   r   r   r   r   r   r   residualself_attn_past_key_valueself_attn_weightspresent_key_valuecross_attn_present_key_valuecross_attn_weightscross_attn_past_key_valueoutputss                     r=   r   zMusicgenDecoderLayer.forwardb  s,   < !11-@@ :H9S>"1"#5#5Y] >Bnn'3)+/ ?M ?
 ?
;(*; --mt|VZVc-dd =0 (,$! ,$H 88GGM @N?Yrss(;(;_c%NRN_N_+!65 :8"3 O` O OKM-/K M11-4<Z^Zg1hhM$}4M !24P P !--m<<**488M+B+BCC--mt?Vaean-oo//--mt|VZVc-dd =0 " 	?)+=>>G 	,)++Gr<   )NNNNNNFT)r2   r3   r4   r*   rT   r6   r   r   r	   r   r   r   r   s   @r=   r   r   A  s)       =4 = = = = = =F 268<9=26=A8<,1$(W W|W !.W  (5	W
 !) 6W "%,/W %-U\$:W !u|!45W $D>W D>W 
W W W W W W W Wr<   r   c                   4    e Zd ZdZeZdZdZddgZdZ	dZ
d ZdS )MusicgenPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    modelTr   r   c                    | j         j        }t          |t          j        t          j        f          rJ|j        j                            d|           |j	         |j	        j        
                                 d S d S t          |t          j                  rS|j        j                            d|           |j        -|j        j        |j                 
                                 d S d S d S )Nr   )r   std)r   initializer_factor
isinstancera   r   Conv1dr   datanormal_r   zero_	Embeddingpadding_idx)rV   moduler
  s      r=   _init_weightsz%MusicgenPreTrainedModel._init_weights  s    k,fry")455 	?M&&CS&999{& &&((((( '&-- 	?M&&CS&999!-"6#56<<>>>>>	? 	?--r<   N)r2   r3   r4   r5   r*   config_classbase_model_prefixsupports_gradient_checkpointing_no_split_modules_supports_flash_attn_2_supports_sdpar  r;   r<   r=   r  r    sW         
 )L&*#/1DE!N	? 	? 	? 	? 	?r<   r  u  

    The Musicgen model was proposed in [Simple and Controllable Music Generation](https://arxiv.org/abs/2306.05284) by
    Jade Copet, Felix Kreuk, Itai Gat, Tal Remez, David Kant, Gabriel Synnaeve, Yossi Adi, Alexandre Défossez. It is an
    encoder decoder transformer trained on the task of conditional music generation

    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`MusicgenConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a4  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary, corresponding to the sequence of audio codes.

            Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes,
            such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            <Tip warning={true}>

            The `decoder_input_ids` will automatically be converted from shape `(batch_size * num_codebooks,
            target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If
            you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of
            frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks,
            target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as
            `decoder_input_ids`.

            </Tip>

        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.
        head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
            1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
            hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
            `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

            Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
            blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
            This is useful if you want more control over how to convert `input_ids` indices into associated vectors
            than the model's internal embedding lookup matrix.
        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
            representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
            input (see `past_key_values`). This is useful if you want more control over how to convert
            `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.

            If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
            of `inputs_embeds`.
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
a  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size * num_codebooks, sequence_length)`):
            Indices of input sequence tokens in the vocabulary, corresponding to the sequence of audio codes.

            Indices can be obtained by encoding an audio prompt with an audio encoder model to predict audio codes,
            such as with the [`EncodecModel`]. See [`EncodecModel.encode`] for details.

            [What are input IDs?](../glossary#input-ids)

            <Tip warning={true}>

            The `input_ids` will automatically be converted from shape `(batch_size * num_codebooks,
            target_sequence_length)` to `(batch_size, num_codebooks, target_sequence_length)` in the forward pass. If
            you obtain audio codes from an audio encoding model, such as [`EncodecModel`], ensure that the number of
            frames is equal to 1, and that you reshape the audio codes from `(frames, batch_size, num_codebooks,
            target_sequence_length)` to `(batch_size * num_codebooks, target_sequence_length)` prior to passing them as
            `input_ids`.

            </Tip>

        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
            Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of
            the decoder.
        encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
            Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
            selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
            cross-attention on hidden heads. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
            Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
            `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

            Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
            blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
            This is useful if you want more control over how to convert `input_ids` indices into associated vectors
            than the model's internal embedding lookup matrix.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
c                       e Zd ZdZdef fdZd Zd Z ee	          	 	 	 	 	 	 	 	 	 	 	 	 dde
j        dee
j                 d	ee
j                 d
ee
j                 dee
j                 dee
j                 deeee
j                                   dee
j                 dee         dee         dee         dee         deeef         fd            Z xZS )MusicgenDecoderzw
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`MusicgenDecoderLayer`]
    r   c                    t                                                     j        | _        j        | _        j        | _        j        | _        j        | _        j	        rt          j        j                  nd| _        j        dz   t          j        fdt!          j                  D                       | _        t%          j        j                  | _        t          j        fdt!          j                  D                       | _        t          j        j                  | _        j        | _        d| _        |                                  d S )N      ?r(   c                 D    g | ]}t          j        j                  S r;   )ra   r  r   ).0r   r   r   s     r=   
<listcomp>z,MusicgenDecoder.__init__.<locals>.<listcomp>  s(    ^^^QR\)V%788^^^r<   c                 .    g | ]}t                    S r;   )r   r   r   r   s     r=   r!  z,MusicgenDecoder.__init__.<locals>.<listcomp>  s"    $k$k$ka%9&%A%A$k$k$kr<   F)rS   rT   r   	layerdropmax_position_embeddingsmax_target_positionsr   d_modelnum_codebooksscale_embeddingrj   sqrtembed_scale
vocab_sizera   
ModuleListrangeembed_tokensrN   embed_positionsnum_hidden_layerslayersr   
layer_normr   attn_implementationgradient_checkpointing	post_init)rV   r   r   rW   s    `@r=   rT   zMusicgenDecoder.__init__  sK      ~)$*$B!)#1<B<R[49V%7888X[%)	M^^^^^%H\B]B]^^^
 
  E* 
  

 m$k$k$k$k5QWQiKjKj$k$k$kll,v'9::#)#> &+#r<   c                     | j         S rR   r/  rV   s    r=   get_input_embeddingsz$MusicgenDecoder.get_input_embeddings        r<   c                     || _         d S rR   r8  rV   values     r=   set_input_embeddingsz$MusicgenDecoder.set_input_embeddings  s    !r<   Nr>   r0   r   r   	head_maskcross_attn_head_maskpast_key_valuesinputs_embedsr   r   output_hidden_statesreturn_dictr   c                      |
|
n j         j        }
||n j         j        }|	|	n j         j        }	||n j         j        }||t          d          |7|                    d j        |j        d                     j        \  }}}||f}n?|.|	                                d d         }|d d d d dd f          nt          d          ||d         d         j        d         nd}|)t            fdt          |          D                       } j        dk    r|d|v r|nd }n4 j        dk    r||
st          ||||          }nt          ||||          }|`|^ j        dk    r	d|v r|nd }nJ j        dk    r"| |
st          ||j        |d         	          }nt#          ||j        |d         	          }                      |          }||                    |j                  z   }t*          j                            | j         j        
          } j        r% j        r|	rt4                              d           d}	|rdnd }|
rdnd }|
r|dnd }|	rdnd }t9          ||gddg          D ]z\  }}|s|	                                d         t;           j                  k    rCt          d| dt;           j                   d|	                                d          d          {t?           j                  D ]\  }}|r||fz  }tA          j!        dd          } j        r| j"        k     r5|||         nd } j        r? j        r8 #                    |j$        |||||||         nd |||         nd d |
|	
  
        }n( ||||||||         nd |||         nd ||
|		  	        }|d         }|	r|||
rdnd         fz  }|
r||d         fz  }|||d         fz  } %                    |          }|r||fz  }|	r|nd }|stM          d |||||fD                       S tO          |||||          S )NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timerB   zEYou have to specify either decoder_input_ids or decoder_inputs_embedsr   r$   c                 P    g | ]"} j         |         d d |f                   #S rR   r8  )r   codebookinputrV   s     r=   r!  z+MusicgenDecoder.forward.<locals>.<listcomp>  s:     v v vU]!<!28!<U111h;=O!P!P v v vr<   r   r   )r   r   z[`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`...Fr;   r@  rA  zThe `z` should be specified for z layers, but it is for r   r(   )r0   r   r   r   r   r   r   r   r   c              3      K   | ]}||V  	d S rR   r;   )r   vs     r=   	<genexpr>z*MusicgenDecoder.forward.<locals>.<genexpr>e  s0        =  === r<   last_hidden_staterB  r   
attentionscross_attentions)(r   r   rD  r   use_return_dictrI   r   r(  rG   rz   sumr.  r4  r   r   r   r\   r   r0  r`   r]   ra   r   r   r   r5  r   r   ziplenr2  	enumeraterandomuniformr$  _gradient_checkpointing_funcr   r3  tupler   )!rV   r>   r0   r   r   r@  rA  rB  rC  r   r   rD  rE  r~   r(  r   input_shaperx   	positionsr   all_hidden_statesall_self_attnsall_cross_attentionsnext_decoder_cacher   	mask_nameidxdecoder_layerdropout_probabilityr   layer_outputs
next_cacherI  s!   `                               @r=   r   zMusicgenDecoder.forward  sN     2C1N--TXT_Tq$8$D  $+Jj 	 "+!6IIDK<Q	%0%<kk$+B]  ]%>sttt"%%b$*<iob>QRRE*/+'C.KK&',,..ss3K!!!!QQQ),EEdeee DSC^!3A!6!<Q!?!?de  v v v v vafgtauau v v vwwM#':::0>0JqTbObOb^^imNN%//I4EN_4E H&	 NN ?]<R N
 !,1G1S'+>>>CDH^C^C^)?)?dh&&)V338L8T]n8T *M*!''O* * *&& *D*M,?UW* * *&
 ((0FGG	%	]5I(J(JJ--mt|VZVc-dd& 	"4= 	" "##q   "	 #7@BBD0:d&7h<Q<]rrdh#,6RR$ %(4H(IKYoKp$q$q 	 	 Iy$>>##A&#dk*:*:::$3	 3 3SEUEU 3 3%NN,,Q/3 3 3   #,DK"8"8 .	@ .	@C# 6!m%55!"(.A"6"6} "5"F"F5D5P_S11VZN* t}  $ A A!)!")*&/&;IcNN1E1Q(--W[%! ! !.!#1*?+A7@7LYs^^RV5I5U,S11[_#1&7'! ! ! *!,M V"}:K5RQQQR'S&UU"  @=#3"55(4(]1-=,??(66   	2-!11+4>''$
 	  '5FXlm     
 9+&+%1
 
 
 	
r<   NNNNNNNNNNNN)r2   r3   r4   r5   r*   rT   r:  r?  r   !MUSICGEN_DECODER_INPUTS_DOCSTRINGr6   r9   r   r   r7   r	   r   r
   r   r   r   r   s   @r=   r  r    s        4      6! ! !" " " +*+LMM '+15=A=A,07;EI59$(,0/3&*l
 l
#l
 !.l
  ((9:	l

 !))9 :l
 EL)l
 'u|4l
 "%e.?(@"ABl
   12l
 D>l
 $D>l
 'tnl
 d^l
 
u??	@l
 l
 l
 NMl
 l
 l
 l
 l
r<   r  z^The bare Musicgen decoder model outputting raw hidden-states without any specific head on top.c                       e Zd Zdef fdZd Zd Zd Z ee	          	 	 	 	 	 	 	 	 	 	 	 	 dde
j        dee
j                 d	ee
j                 d
ee
j                 dee
j                 dee
j                 deeee
j                                   dee
j                 dee         dee         dee         dee         deeef         fd            Z xZS )MusicgenModelr   c                     t                                          |           t          |          | _        |                                  d S rR   )rS   rT   r  decoderr6  r   s     r=   rT   zMusicgenModel.__init__x  s@       &v..r<   c                     | j         j        S rR   rk  r/  r9  s    r=   r:  z"MusicgenModel.get_input_embeddings~  s    |((r<   c                     || j         _        d S rR   rm  r=  s     r=   r?  z"MusicgenModel.set_input_embeddings  s    $)!!!r<   c                     | j         S rR   rk  r9  s    r=   get_decoderzMusicgenModel.get_decoder  
    |r<   Nr>   r0   r   r   r@  rA  rB  rC  r   r   rD  rE  r   c                 &   |
|
n| j         j        }
||n| j         j        }|	|	n| j         j        }	||n| j         j        }|                     |||||||||	|
||          }|s|S t          |j        |j        |j	        |j
        |j                  S )N)r>   r0   r   r   r@  rA  rB  rC  r   r   rD  rE  rM  )r   r   rD  r   rQ  rk  r   rN  rB  r   rO  rP  )rV   r>   r0   r   r   r@  rA  rB  rC  r   r   rD  rE  decoder_outputss                 r=   r   zMusicgenModel.forward  s      2C1N--TXT_Tq$8$D  $+Jj 	 "+!6IIDK<Q	%0%<kk$+B] ,,)#9"7!5+'/!5# ' 
 
  	#""8-?+;)7&1,=
 
 
 	
r<   rf  )r2   r3   r4   r*   rT   r:  r?  rq  r   rg  r6   r9   r   r   r7   r	   r   r
   r   r   r   r   s   @r=   ri  ri  s  s       
4      ) ) )* * *   +*+LMM '+15=A=A,07;EI59$(,0/3&*/
 /
#/
 !./
  ((9:	/

 !))9 :/
 EL)/
 'u|4/
 "%e.?(@"AB/
   12/
 D>/
 $D>/
 'tn/
 d^/
 
u??	@/
 /
 /
 NM/
 /
 /
 /
 /
r<   ri  zAThe MusicGen decoder model with a language modelling head on top.c            !           e Zd Zdef fdZd Zd Zd Zd Zd Z	d Z
 ee           eee	          	 	 	 	 	 	 	 	 	 	 	 	 	 d(dej        deej                 deej                 deej                 deej                 deej                 deeeej                                   deej                 deej                 dee         dee         dee         dee         deeef         fd                        Z	 	 	 	 	 	 	 	 	 d)dZd*dej        dedefdZed             Z ej                    	 	 	 	 	 	 d+d eej                 d!ee          d"ee!         d#ee"         d$ee         d%ed&         fd'            Z# xZ$S ),MusicgenForCausalLMr   c                 "   t                                                     t                    | _        j        | _        t          j        fdt          j                  D                       | _        | 	                                 d S )Nc                 R    g | ]#}t          j        j        j        d           $S )Fr   )ra   r   r   r,  r#  s     r=   r!  z0MusicgenForCausalLM.__init__.<locals>.<listcomp>  s0    oooaRYv)6+<5IIIooor<   )
rS   rT   ri  r  r(  ra   r-  r.  lm_headsr6  r   s    `r=   rT   zMusicgenForCausalLM.__init__  s       "6**
#1ooooSXY_YmSnSnooo
 

 	r<   c                 $    | j         j        j        S rR   r  rk  r/  r9  s    r=   r:  z(MusicgenForCausalLM.get_input_embeddings  s    z!..r<   c                 (    || j         j        _        d S rR   r{  r=  s     r=   r?  z(MusicgenForCausalLM.set_input_embeddings  s    */
'''r<   c                     | j         S rR   ry  r9  s    r=   get_output_embeddingsz)MusicgenForCausalLM.get_output_embeddings  s
    }r<   c                     || _         d S rR   r~  rV   new_embeddingss     r=   set_output_embeddingsz)MusicgenForCausalLM.set_output_embeddings  s    &r<   c                     || j         _        d S rR   r  rk  )rV   rk  s     r=   set_decoderzMusicgenForCausalLM.set_decoder  s    $
r<   c                     | j         j        S rR   r  r9  s    r=   rq  zMusicgenForCausalLM.get_decoder  s    z!!r<   output_typer  Nr>   r0   r   r   r@  rA  rB  rC  labelsr   r   rD  rE  r   c                     ||n| j         j        }|	)|'|%t          |	| j         j        | j         j                  }|                     |||||||||
|||          }|d         t          j        fd| j        D             d          }d}|	|dddd|	j	        d          df         }t                      }t          j        g | j                  }|	                    |	| j         j        k    d          }	t          | j         j                  D ]}}|dd|f                                                             d	|j	        d	                   }|	d
|f                                                             d	          }| |||          z  }~|| j         j        z  } |j        d	g|j	        dd         R  }|s|f|dd         z   }||f|z   n|S t'          |||j        |j        |j        |j                  S )a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length, num_codebooks)`, *optional*):
            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
        Returns:
        N)r0   r   r   r@  rA  rB  rC  r   r   rD  rE  r   c                 &    g | ]} |          S r;   r;   )r   headr   s     r=   r!  z/MusicgenForCausalLM.forward.<locals>.<listcomp>  s#     O O Om!4!4 O O Or<   r(   rh   r]   rD   rB   .r$   )losslogitsrB  r   rO  rP  )r   rQ  rL   r?   bos_token_idr  r6   stackry  rG   r   rt   r]   masked_fillr.  r(  r   rs   r   r   rB  r   rO  rP  )rV   r>   r0   r   r   r@  rA  rB  rC  r  r   r   rD  rE  r  	lm_logitsr  r  loss_fctrH  codebook_logitscodebook_labelsoutputr   s                          @r=   r   zMusicgenForCausalLM.forward  sb   4 &1%<kk$+B]Y%6=;P*64;3KT[MeffI**)"7#9!5+'/!5#  
 
  
K O O O O O O OUVWWW	 qqq!!!fl1o%5%7%778F'))H;r$+666D ''$+2J(JDQQF "$+";<< C C"(H"5"@"@"B"B"G"GFLY[L\"]"]"(h"7"B"B"D"D"I"I""M"M/BBB$+33D &I%b?9?122+>???	 	F\GABBK/F)-)9TGf$$vE0#3!/)$5
 
 
 	
r<   Tc           	      .   |	/|                      || j        j        | j        j                  \  }}	|                     ||	          }|
2|
dk    r,|                    d          }||                    d          }||d d dd f         }||||||||dS )Nr?   
max_lengthr(   r$   r(   rB   )r>   r0   r   r   r@  rA  rB  r   )build_delay_pattern_maskgeneration_configr?   r  apply_delay_pattern_maskrepeat)rV   r>   r0   r   r   r@  rA  rB  r   delay_pattern_maskr1   r   s               r=   prepare_inputs_for_generationz1MusicgenForCausalLM.prepare_inputs_for_generation6  s     %,0,I,I!3@1< -J - -)I) 11)=OPP	%.1*<*< "((00I)!/!6!6v!>!>&!!!!RSS&)I #,%:&<"$8."	
 	
 		
r<   r?   r  c                 6   |                     d| j        |j        d                   }|j        \  }}}||n| j        j        }t          j        |||ft
          j        |j                  dz  }| j	        j
        dk    r|dz  n|}|d|z  dz
  k     r2|                     ||z  d          |                     ||z  d          fS t          |          D ]p}	| j	        j
        dk    r|dd|	f         |dd|	|	||	z   f<   ,|ddd|	z  f         |ddd|	z  |	||	z   f<   |ddd|	z  dz   f         |ddd|	z  dz   |	||	z   f<   qt          j        t          j        ||ft
          j                  ||z
  dz             }
|
t          j        t          j        ||ft
          j                            z   }
| j	        j
        dk    r|
                    dd	          }
|
                    |j                   }||z  | |z  z   }|dddddf         }|dk                                    dddf         }t%          |          dk    rt'          |          }n|}|                     ||z  d          }|d
d|f                              ||z  d          }||fS )aD  Build a delayed pattern mask to the input_ids. Each codebook is offset by the previous codebook by
        one, giving a delayed pattern mask at the start of sequence and end of sequence. Take the example where there
        are 4 codebooks and a max sequence length of 8, we have the delayed pattern mask of shape `(codebooks,
        seq_len)`:
        - [P, -1, -1, -1, -1, P, P, P]
        - [P, P, -1, -1, -1, -1, P, P]
        - [P, P, P, -1, -1, -1, -1, P]
        - [P, P, P, P, -1, -1, -1, -1]
        where P is the special padding token id and -1 indicates that the token is valid for prediction. If we include
        a prompt (decoder input ids), the -1 positions indicate where new tokens should be predicted. Otherwise, the
        mask is set to the value in the prompt:
        - [P, a, b, -1, -1, P, P, P]
        - [P, P, c, d, -1, -1, P, P]
        - [P, P, P, e, f, -1, -1, P]
        - [P, P, P, P, g, h, -1, -1]
        where a-h indicate the input prompt (decoder input ids) that are offset by 1. Now, we only override the -1
        tokens in our prediction.
        rB   Nr[   r$   r(   rg   )diagonalr   rh   .)r   r(  rG   r  r  r6   oneslongr]   r   audio_channelsr.  triur   trilrepeat_interleaver`   nonzerorT  r   )rV   r>   r?   r  r~   r(  r   input_ids_shiftedchannel_codebooksrH  delay_patternmaskfirst_codebook_ids	start_idsfirst_start_idpattern_masks                   r=   r  z,MusicgenForCausalLM.build_delay_pattern_maskd  sL   ( %%b$*<iob>QRR	&/o#]G#-#9ZZt?U?`
J]J7uzR[Rbcccfhh 	 37+2LPQ2Q2QMQ..Wd--111$$S=%8"==?P?X?XY\_lYlnp?q?qqq /00 	w 	wH{)Q..PYZ[Z[Z[]eZePf!!!!Xx'H:L/L"LMM U^^_^_^_abemam^mTn!!!!Q\8g>P3P"PQXabcbcbcefiqeqtueubuXv!!!!Q\A%5x'HBT7T"TUU 
J):6ejIIIT^arTruvTv
 
 
 &
5:?PR\>]ejeo3p3p3p(q(qq;%**);;A1;EEM  !1222,,u|/CC	 'qqq!QQQw/'2-6688A>	y>>A ^^NN %N !((})<bAAc?N?23;;C-<OQSTT	,&&r<   c                 l    | j         d         }|dd|f         }t          j        |dk    | |          } | S )zApply a delay pattern mask to the decoder input ids, only preserving predictions where
        the mask is set to -1, and otherwise setting to the value detailed in the mask.rB   .N)rG   r6   where)r>   decoder_pad_token_maskr   s      r=   r  z,MusicgenForCausalLM.apply_delay_pattern_mask  sC     /"%!7XgX!FK 6" <iI_``	r<   inputsr  logits_processorstopping_criteriasynced_gpusstreamerr,   c           	         || j         }t          j        |          } |j        di |}|                                 |                     |                                           ||nt                      }||nt                      }d|v}	|                    dd          du}
| 	                    ||j
        |          \  }}}|j        d         | j        z  }|                     ||
|j                   |j        |d<   |j        |d<   |                    dd          &|	r$|                     ||j        |j                  |d<   |j        d         }|                    d	          du o|j        du}|                    d
          du o|j        du}|                     ||||||          }|                     ||j        |j                  \  }}|'|                    |                                           ||d<   |                                }|j        9|j        dk    r.|                    t;          |j                             d|_        |                     |||d||j                  }|                     ||          }|t@          j!        t@          j"        fv r- | j#        d||j$        d|\  }} | j%        |f|||||d|}ntM          d          |j'        r|j(        }n|}| )                    ||d                   }|||j        k             *                    || j        d          }|j'        r	||_(        |S |S )5  

        Generates sequences of token ids for models with a language modeling head.

        <Tip warning={true}>

        Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
        model's default generation configuration. You can override any `generation_config` by passing the corresponding
        parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.

        For an overview of generation strategies and code examples, check out the [following
        guide](./generation_strategies).

        </Tip>

        Parameters:
            inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):
                The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
                method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
                should be in the format `input_ids`. For encoder-decoder models *inputs* can represent any of
                `input_ids`, `input_values`, `input_features`, or `pixel_values`.
            generation_config (`~generation.GenerationConfig`, *optional*):
                The generation configuration to be used as base parametrization for the generation call. `**kwargs`
                passed to generate matching the attributes of `generation_config` will override them. If
                `generation_config` is not provided, the default will be used, which had the following loading
                priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
                configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
                default values, whose documentation should be checked to parameterize generation.
            logits_processor (`LogitsProcessorList`, *optional*):
                Custom logits processors that complement the default logits processors built from arguments and
                generation config. If a logit processor is passed that is already created with the arguments or a
                generation config an error is thrown. This feature is intended for advanced users.
            stopping_criteria (`StoppingCriteriaList`, *optional*):
                Custom stopping criteria that complement the default stopping criteria built from arguments and a
                generation config. If a stopping criteria is passed that is already created with the arguments or a
                generation config an error is thrown. This feature is intended for advanced users.
            synced_gpus (`bool`, *optional*, defaults to `False`):
                Whether to continue running the while loop until max_length (needed to avoid deadlocking with
                `FullyShardedDataParallel` and DeepSpeed ZeRO Stage 3).
            streamer (`BaseStreamer`, *optional*):
                Streamer object that will be used to stream the generated sequences. Generated tokens are passed
                through `streamer.put(token_ids)` and the streamer is responsible for any further processing.
            kwargs (`Dict[str, Any]`, *optional*):
                Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
                forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
                specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.

        Return:
            [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`
            or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.

                If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
                [`~utils.ModelOutput`] types are:

                    - [`~generation.GenerateDecoderOnlyOutput`],
                    - [`~generation.GenerateBeamDecoderOnlyOutput`]

                If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
                [`~utils.ModelOutput`] types are:

                    - [`~generation.GenerateEncoderDecoderOutput`],
                    - [`~generation.GenerateBeamEncoderDecoderOutput`]
        Nr/   r0   r   r  r   r1   rB   r  
min_lengthr  has_default_max_lengthhas_default_min_lengthmodel_input_nameinputs_tensorinput_ids_lengthr  r  r(   r  input_ids_seq_lengthencoder_input_idsprefix_allowed_tokens_fnr  r]   r  r  )r>   expand_sizer  r  r  r  r  Got incompatible mode for generation, should be one of greedy or sampling. Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`.r;   )+r  copydeepcopyupdatevalidate_validate_model_kwargsr   r   get_prepare_model_inputsr  rG   r(  _prepare_special_tokensr]   r   r1   &_prepare_attention_mask_for_generation_pad_token_tensor_eos_token_tensorr  r  _prepare_generated_lengthr  _decoder_start_token_tensorputcpuget_generation_modeappendr   _get_logits_processor_get_stopping_criteriar   SAMPLEGREEDY_SEARCH_expand_inputs_for_generationnum_return_sequences_samplerI   return_dict_in_generate	sequencesr  r   )rV   r  r  r  r  r  r  r   model_kwargsrequires_attention_maskkwargs_has_attention_maskr>   r  
batch_sizer  r  r  r  generation_moder  
output_idss                        r=   generatezMusicgenForCausalLM.generate  sS   V $ $ 6 M*;<</(/99&99""$$$##L$5$5$7$7888 0@/K++QdQfQf1B1N--ThTjTj"3<"G$0$4$45Et$L$LTX$X! 594N4N%2L5
 5
1	#\ _Q'4+==
$$%68QZcZj$kkk %6$?[!):)I%&,d33;@W;-1-X-X,>@Q@c. .L)*
 %?2.!'L!9!9T!A!nFWFbjnFn!'L!9!9T!A!nFWFbjnFn ::/#9#9-#- ; 
 
 )-(E(E*F(3 )F )
 )
%	% LL))) .@)* ,??AA +7<M<\_`<`<`##$IJ[Jj$k$klll/3,  55/!1'%)-# 6 
 
 !77/CT 8 
 
 ~4n6RSSS&Hd&H '#-B' ' ' '#I| #dl!1"3"3'!   GG l  
 4 	! *JJ J 22:|L`?abb
  
.?.Q QRZZ*B
 

 4 	 *GNr<   )NNNNNNNNNNNNN)	NNNNNNTNNrR   NNNNNN)%r2   r3   r4   r*   rT   r:  r?  r  r  r  rq  r   rg  r#   r   _CONFIG_FOR_DOCr6   r9   r   r   r7   r	   r   r
   r   r   r  r   r  r   r  r   r   r   r   r  r   r   s   @r=   rv  rv    sO       
4      / / /0 0 0  ' ' '% % %" " " +*+LMM?YYY '+15=A=A,07;EI59-1$(,0/3&*T
 T
#T
 !.T
  ((9:	T

 !))9 :T
 EL)T
 'u|4T
 "%e.?(@"ABT
   12T
 )*T
 D>T
 $D>T
 'tnT
 d^T
 
u77	8T
 T
 T
 ZY NMT
r "#!,
 ,
 ,
 ,
\G' G'%2B G'RU G'cf G' G' G' G'R   \ U]__ *.8<:><@&*-1E E&E $$45E ##67	E
 $$89E d^E >*E E E _E E E E Er<   rv  zThe composite MusicGen model with a text encoder, audio encoder and Musicgen decoder, for music generation tasks with one or both of text and audio prompts.c            %       p    e Zd ZeZdZdZdZdZdZ		 	 	 	 dCde
e         de
e         de
e         de
e         f fd	Zd
 Zd Zd Zd Zd Zd Zd Zd Ze fd            Ze	 	 	 dDdedededefd            Z ee           eee          	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dEde
ej                  de
ej!                 de
ej"                 de
ej!                 de
ej                  de
ej!                 de
e#ej"                          de#e#ej"                          d e
ej"                 d!e
ej"                 d"e
ej                  d#e
e$         d$e
e$         d%e
e$         d&e
e$         de%e#ef         f d'                        Z&	 	 	 	 	 	 	 	 	 	 dFd(Z'	 	 	 dDd)e(d*ed+e)eej*        f         d,e(d-e(d.ej+        de#ej         e)eej*        f         f         fd/Z,d0ej*        d*e
e         d1e-de)ee.f         fd2Z/	 dGd*e
e         fd3Z0d"ej*        fd4Z1d5 Z2d6 Z3d7 Z4	 	 	 dDd8e
ej*                 d-e
e(         d+e
e)eej*        f                  dej         fd9Z5	 dHd,e%e(e6e(         f         d-e(de(fd:Z7 ej8                    	 	 	 	 	 	 dId8e
ej*                 d1e
e-         d;e
e9         d<e
e:         d=e
e$         d>e
d?         fd@            Z;dJdBZ< xZ=S )K MusicgenForConditionalGenerationencoder_decoderr>   TNr   text_encoderaudio_encoderrk  c                    ||||t          d          |&t          j        |j        |j        |j                  }n/t	          || j                  st          d| d| j                   |j        j        D|j        j        |j        j	        k    r*t          d|j        j         d|j        j	         d          t                                          |           | ddlm} |                    |j                  }|dd	lm}  |j        |j                  }|t"                              |j                  }|| _        || _        || _        | j        j                                        | j        j                                        k    r4t(                              d
| j        j         d| j        j                    | j        j                                        | j        j                                        k    r4t(                              d| j        j         d| j        j                    | j        j                                        | j        j                                        k    r4t(                              d| j        j         d| j        j                    | j        j        j        | j        j        _        | j        j        j        | j        j        _        | j        j        j        | j        j        _        | j        j        | j        _        | j        j        | j        _        | j        j        | j        _        | j        j        j	        | j        j        j	        k    rI| j        j        j        8t1          j        | j        j        j	        | j        j        j	                  | _        | j                                        t          d| j         d          t9          t;          j        | j        j                  j         !                                          }d|vrt          d          | "                                 d S )NzlEither a configuration has to be provided, or all three of text encoder, audio encoder and MusicGen decoder.zConfig: z has to be of type zIf `cross_attention_hidden_size` is specified in the MusicGen decoder's configuration, it has to be equal to the text encoder's `hidden_size`. Got z6 for `config.decoder.cross_attention_hidden_size` and z' for `config.text_encoder.hidden_size`.r$   )AutoModelForTextEncodingr&   zConfig of the text_encoder: z/ is overwritten by shared text_encoder config: zConfig of the audio_encoder: z0 is overwritten by shared audio_encoder config: zConfig of the decoder: z* is overwritten by shared decoder config: zThe encoder zB should not have a LM Head. Please use a model without and LM Headr   zThe selected decoder is not prepared for the encoder hidden states to be passed. Please see the following discussion on GitHub: https://github.com/huggingface/transformers/issues/23350)#rI   r)   from_sub_models_configr   r  r  rk  cross_attention_hidden_sizer  r   rS   rT   auto.modeling_autor  from_configr'   r  rv  _from_configto_dictr   warningrW   r   ra   r   enc_to_dec_projr  setinspect	signaturer   
parameterskeystie_weights)	rV   r   r  r  rk  r  r'   decoder_signaturerW   s	           r=   rT   z)MusicgenForConditionalGeneration.__init__  sJ    >|3}7LPWP_~   >#:<;NP]PdfmftuuFFfd&788 \ !ZF!Z!ZtGX!Z!Z[[[>5A~9V=P=\\\ :AGAk: :IOI\Ih: : :   	   EEEEEE3??@STTL 6666661I1&2FGGM?)66v~FFG(*#++--1I1Q1Q1S1SSSNN/t/@/J / /K,/ /   $,,..$+2K2S2S2U2UUUNN00B0L 0 0K-0 0   <&&((DK,?,G,G,I,IIINN*$,*@ * *K'* *   9=8I8P8e 59=9K9R9g!637<3F3[0#';#; $(K$=!"k1 $0DL4G4SSS#?G#%9T->-E-QSWS_SfSr#s#sD 2244@tt0ttt     1$,2F G G R W W Y YZZ"*;;;k   	r<   c                     | j         j        rL| j        j        }|                     | j        | j        j        |         | j        j        d          }|| _        d S d S )Nr  )r   tie_encoder_decoderrk  r  _tie_encoder_decoder_weightsr  _modules_dynamic_tied_weights_keys)rV   decoder_base_model_prefixtied_weightss      r=   r  z,MusicgenForConditionalGeneration.tie_weights  sh    ;* 	;(,(F%<<!%&?@.	 L /;D+++	; 	;r<   c                     | j         S rR   )r  r9  s    r=   get_audio_encoderz2MusicgenForConditionalGeneration.get_audio_encoder  s    !!r<   c                     | j         S rR   )r  r9  s    r=   get_text_encoderz1MusicgenForConditionalGeneration.get_text_encoder  r;  r<   c                 *    |                                  S rR   )r  r9  s    r=   get_encoderz,MusicgenForConditionalGeneration.get_encoder  s    $$&&&r<   c                     | j         S rR   rp  r9  s    r=   rq  z,MusicgenForConditionalGeneration.get_decoder  rr  r<   c                 4    | j                                         S rR   )r  r:  r9  s    r=   r:  z5MusicgenForConditionalGeneration.get_input_embeddings  s     55777r<   c                 4    | j                                         S rR   )rk  r  r9  s    r=   r  z6MusicgenForConditionalGeneration.get_output_embeddings	  s    |11333r<   c                 6    | j                             |          S rR   )rk  r  r  s     r=   r  z6MusicgenForConditionalGeneration.set_output_embeddings  s    |11.AAAr<   c                     |                     dd          rt                              d           d|d<    t                      j        |g|R i |S )z
        Example:

        ```python
        >>> from transformers import MusicgenForConditionalGeneration

        >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
        ```
_fast_initFz{Fast initialization is currently not supported for MusicgenForConditionalGeneration. Falling back to slow initialization...)r  r   r  rS   from_pretrained)clspretrained_model_name_or_path
model_argsr   rW   s       r=   r  z0MusicgenForConditionalGeneration.from_pretrained  sk     ::lE** 	NN9    %|&uww&'D\z\\\U[\\\r<   *text_encoder_pretrained_model_name_or_path+audio_encoder_pretrained_model_name_or_path%decoder_pretrained_model_name_or_pathr   c           	      8   d |                                 D             }d |                                 D             }d |                                 D             }|                                D ]}	|d|	z   = 	|                                D ]}	|d|	z   = 	|                                D ]}	|d|	z   = 	|                    dd          }
|
|t          d	          d
|vr\t	          j        |fi |ddi\  }}|j        du s	|j        du r,t          	                    d| d           d|_        d|_        ||d
<   t          j        |g|R i |}
|                    dd          }||t          d          d
|vr\t	          j        |fi |ddi\  }}|j        du s	|j        du r,t          	                    d| d           d|_        d|_        ||d
<   t          j        |g|R i |}|                    dd          }||t          d          d
|vr~t	          j        |fi |ddi\  }}t          |t                    r|j        }|j        du s	|j        du r2t          	                    d| d| d| d           d|_        d|_        ||d
<   |d
         j        du s|d
         j        du r!t                              d| d| d           t          j        |fi |}t          j        |
j        |j        |j        fi |} | |
|||          S )a  
        Instantiate a text encoder, an audio encoder, and a MusicGen decoder from one, two or three base classes of the
        library from pretrained model checkpoints.


        The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
        the model, you need to first set it back in training mode with `model.train()`.

        Params:
            text_encoder_pretrained_model_name_or_path (`str`, *optional*):
                Information necessary to initiate the text encoder. Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.

            audio_encoder_pretrained_model_name_or_path (`str`, *optional*):
                Information necessary to initiate the audio encoder. Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.

            decoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
                Information necessary to initiate the decoder. Can be either:

                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.

            model_args (remaining positional arguments, *optional*):
                All remaining positional arguments will be passed to the underlying model's `__init__` method.

            kwargs (remaining dictionary of keyword arguments, *optional*):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
                `output_attentions=True`).

                - To update the text encoder configuration, use the prefix *text_encoder_* for each configuration
                  parameter.
                - To update the audio encoder configuration, use the prefix *audio_encoder_* for each configuration
                  parameter.
                - To update the decoder configuration, use the prefix *decoder_* for each configuration parameter.
                - To update the parent model configuration, do not use a prefix for each configuration parameter.

                Behaves differently depending on whether a `config` is provided or automatically loaded.

        Example:

        ```python
        >>> from transformers import MusicgenForConditionalGeneration

        >>> # initialize a musicgen model from a t5 text encoder, encodec audio encoder, and musicgen decoder
        >>> model = MusicgenForConditionalGeneration.from_sub_models_pretrained(
        ...     text_encoder_pretrained_model_name_or_path="google-t5/t5-base",
        ...     audio_encoder_pretrained_model_name_or_path="facebook/encodec_24khz",
        ...     decoder_pretrained_model_name_or_path="facebook/musicgen-small",
        ... )
        >>> # saving model after fine-tuning
        >>> model.save_pretrained("./musicgen-ft")
        >>> # load fine-tuned model
        >>> model = MusicgenForConditionalGeneration.from_pretrained("./musicgen-ft")
        ```c                 n    i | ]2\  }}|                     d           |t          d           d         |3S )text_encoder_N
startswithrT  r   argumentr>  s      r=   
<dictcomp>zOMusicgenForConditionalGeneration.from_sub_models_pretrained.<locals>.<dictcomp>l  sU     
 
 
%""?33
S))++,e
 
 
r<   c                 n    i | ]2\  }}|                     d           |t          d           d         |3S )audio_encoder_Nr#  r%  s      r=   r'  zOMusicgenForConditionalGeneration.from_sub_models_pretrained.<locals>.<dictcomp>r  sW      
  
  
%""#344 
S)**,,-u 
  
  
r<   c                 n    i | ]2\  }}|                     d           |t          d           d         |3S decoder_Nr#  r%  s      r=   r'  zOMusicgenForConditionalGeneration.from_sub_models_pretrained.<locals>.<dictcomp>x  T     
 
 
3B8UX`XkXklvXwXw
S__&&'
 
 
r<   r"  r)  r,  r  NzxIf `text_encoder_model` is not defined as an argument, a `text_encoder_pretrained_model_name_or_path` has to be defined.r   return_unused_kwargsTzInitializing z\ as a text_encoder model from a decoder model. Cross-attention and casual mask are disabled.Fz{If `audio_encoder_model` is not defined as an argument, an `audio_encoder_pretrained_model_name_or_path` has to be defined.z^ as an audio_encoder model from a decoder model. Cross-attention and casual mask are disabled.znIf `decoder_model` is not defined as an argument, a `decoder_pretrained_model_name_or_path` has to be defined.z9 as a decoder model. Cross attention layers are added to z and randomly initialized if z2's architecture allows for cross attention layers.zDecoder model z9 is not initialized as a decoder. In order to initialize z as a decoder, make sure that the attributes `is_decoder` and `add_cross_attention` of `decoder_config` passed to `.from_sub_models_pretrained(...)` are set to `True` or do not pass a `decoder_config` to `.from_sub_models_pretrained(...)`)r  r  rk  r   )itemsr  poprI   r%   r  r   add_cross_attentionr   infor'   r  r)   rk  r  rv  r  r   )r  r  r  r  r  r   kwargs_text_encoderkwargs_audio_encoderkwargs_decoderkeyr  encoder_configr  rk  decoder_configr   s                   r=   from_sub_models_pretrainedz;MusicgenForConditionalGeneration.from_sub_models_pretrained$  s&   P
 
#)<<>>
 
 
 
  
#)<<>> 
  
  

 
FLllnn
 
 

 '++-- 	. 	.C,--',,.. 	/ 	/C'#-..!&&(( 	) 	)CzC'((
 +..w==9A %  
 2226@6P>7 7BU7 7lp7 7 73 3 ",448Z^b8b8bKK^(R ^ ^ ^   16N-9>N60>#H-$4:=G  K^ L -00$?? :B %  
 3337A7Q?8 8CW8 8nr8 8 84 4 ",448Z^b8b8bKK^(S ^ ^ ^   16N-9>N61?$X.%5;>H  L` M !$$Wd33?4< %  
 ~--1;1K92 2=K2 2bf2 2 2. nn== <%3%;N!,559[_d9d9dKKv(M v v0Uv vAv v v  
 15N-9=N6+9x(h'2e;;~h?W?kot?t?tM%J M M.SM M M   *9:_rrcqrrG  6!5w~
 
IO
 
 sMSZcijjjjr<   r  r0   input_valuespadding_maskdecoder_input_idsdecoder_attention_maskr/   rB  rC  decoder_inputs_embedsr  r   r   rD  rE  c                    ||n| j         j        }d |                                D             }d |                                D             }d |                                D             }| | j        d|||	|||d|}nt	          |t
                    r	t          | }|d         }| j        j         j        | j        j         j        k    r&| j        j         j	        | 
                    |          }|||d         z  }|4|2|
0t          || j         j        j        | j         j        j                  }n||
 | j        d||d|}|j        }|j        \  }}}}|d	k    rt#          d
| d          | j         j        j        dk    r5|j        d         | j        j        dz  k    r|                    dd          }|d                             || j        j        z  |          } | j        d|||||
||||||d|}|s||z   S t-          |j        |j        |j        |j        |j        |j        |j        |j        |j        	  	        S )a  
        Returns:

        Examples:
        ```python
        >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration
        >>> import torch

        >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small")
        >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")

        >>> inputs = processor(
        ...     text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"],
        ...     padding=True,
        ...     return_tensors="pt",
        ... )

        >>> pad_token_id = model.generation_config.pad_token_id
        >>> decoder_input_ids = (
        ...     torch.ones((inputs.input_ids.shape[0] * model.decoder.num_codebooks, 1), dtype=torch.long)
        ...     * pad_token_id
        ... )

        >>> logits = model(**inputs, decoder_input_ids=decoder_input_ids).logits
        >>> logits.shape  # (bsz * num_codebooks, tgt_len, vocab_size)
        torch.Size([8, 1, 2048])
        ```Nc                 j    i | ]0\  }}|                     d           |t          d                    |1S )r"  r#  r%  s      r=   r'  z<MusicgenForConditionalGeneration.forward.<locals>.<dictcomp>  sO     
 
 
%""?33
S))*E
 
 
r<   c                 j    i | ]0\  }}|                     d           |t          d                    |1S )r)  r#  r%  s      r=   r'  z<MusicgenForConditionalGeneration.forward.<locals>.<dictcomp>!  sQ      
  
  
%""#344 
S)**+U 
  
  
r<   c                 n    i | ]2\  }}|                     d           |t          d           d         |3S r+  r#  r%  s      r=   r'  z<MusicgenForConditionalGeneration.forward.<locals>.<dictcomp>'  r-  r<   )r>   r0   rC  r   rD  rE  r   ).N)r:  r;  r(   0Expected 1 frame in the audio code outputs, got Y frames. Ensure chunking is disabled by setting `chunk_length=None` in the audio encoder.r$   rh   r   .)r>   r0   r   r   rC  r   rD  r   rB  rE  r  )	r  r  rB  decoder_hidden_statesdecoder_attentionsrP  encoder_last_hidden_stater   encoder_attentionsr;   )r   rQ  r/  r  r  rY  r   r   rk  r  r  rL   r?   r@   r  audio_codesrG   rI   r  r(  r  r   r   r  r  rB  r   rO  rP  rN  )rV   r>   r0   r:  r;  r<  r=  r/   rB  rC  r>  r  r   r   rD  rE  r   r3  r4  r5  r   audio_encoder_outputsrJ  framesr~   r   r   rt  s                               r=   r   z(MusicgenForConditionalGeneration.forward  s8   ` &1%<kk$+B]
 
#)<<>>
 
 
 
  
#)<<>> 
  
  

 
FLllnn
 
 
 "/d/ #-+"3%9'  & OO // 	@-?O / 2 $0DL4G4SSS#?G$($8$89N$O$O!%$9N9<U$U!%6%>CXC` 2+8$+:M:d! ! &+@+H$6D$6 %))% % '% %!
 0;K.9.?+FCG{{ Tv T T T  
 {"1Q66;;LQ;OSWS_SmqrSr;r;r);;A1;EE +F 3 ; ;C$,B\<\^e f f '$, 
'1"7#1//!5+#
 
 
 
  	5"_44 %")+;"1"?.9,=&5&G"1"?.9

 

 

 
	
r<   c                    |
4| j                             || j        j        | j        j                  \  }}
| j                             ||
          }|2|dk    r,|                    d          }||                    d          }|K|d         d         j        d         }|j        d         |k    r|}n|j        d         dz
  }|d d |d f         }d |	||||||||d
S )N)r  r(   r  r   r$   )
r>   r/   rB  r<  r0   r=  r@  decoder_head_maskrA  r   )rk  r  r  r?   r  r  r  rG   )rV   r<  rB  r0   r@  r=  rN  rA  r   r/   decoder_delay_pattern_maskr1   r   past_lengthremove_prefix_lengths                  r=   r  z>MusicgenForConditionalGeneration.prepare_inputs_for_generation|  s9     &-<@L<a<a!&31< =b = =99 !LAABSUopp%.1*<*< !2 8 8 @ @%1)?)F)Fv)N)N&&)!,Q/5a8K !&q)K77'2$$ (9'>q'AA'E$ 1!!!5I5J5J2J K ..!2,&<"!2$8"
 
 	
r<   r  r  r  r@   r  r]   c                 R   |d|v r|                     d          }n"d|v r|dk    r|                     d          }nd}|                     ||          }|| j        }t          j        || j        j        z  dft          j        |          |z  }||}n|d         |k                                    	                                r_t          j
        ||gd          }d	|v rC|d	         }	t          j
        t          j        |	          ddddf         |	fd          }	|	|d	<   ||fS )
zGPrepares `decoder_input_ids` for generation with encoder-decoder modelsNr<  r>   r(   r[   rC   rB   rh   r=  )r0  _get_decoder_start_token_idr]   r6   r  rk  r(  r  allitemrp   	ones_like)
rV   r  r  r  r@   r  r]   r<  decoder_input_ids_startr=  s
             r=   )_prepare_decoder_input_ids_for_generationzJMusicgenForConditionalGeneration._prepare_decoder_input_ids_for_generation  s    #(;|(K(K , 0 01D E EL((-=-L-L , 0 0 = = $ "&!A!ABXZf!g!g>[FJ
T\%??C5:^deee$% 	  $ 7  '+AAFFHHMMOO 	P %	+BDU*V\^ _ _ _'<77)56N)O&)._%;<<QQQUCE[\* * *& :P56 ,..r<   r  r  c                   
 |                                  }t          |d          rd|j        _        g dfd|                                D             }t          t          j        |j                  j	                  
d
v pd
v }|s 
fd|                                D             }|j
        |d<   |j        |d	<   |j        }||n| j        j        }d|d
<   |||<    |di |j        }	|m|dk    rgt!          j        |	t!          j        |	          gd          }	d|v r9t!          j        |d         t!          j        |d                   gd          |d<   t'          |	          |d<   |S )N_hf_hookTr,  
cross_attnr   c                 T    i | ]#\  }t          fd D                        |$S )c              3   B   K   | ]}                     |          V  d S rR   r$  r   r   r&  s     r=   rL  zjMusicgenForConditionalGeneration._prepare_text_encoder_kwargs_for_generation.<locals>.<dictcomp>.<genexpr>  1      II!x**1--IIIIIIr<   r   r   r>  r&  irrelevant_prefixs     @r=   r'  z`MusicgenForConditionalGeneration._prepare_text_encoder_kwargs_for_generation.<locals>.<dictcomp>  T     
 
 
%IIII7HIIIII
e
 
 
r<   r   r  c                 $    i | ]\  }}|v 	||S r;   r;   r   r&  r>  encoder_signatures      r=   r'  z`MusicgenForConditionalGeneration._prepare_text_encoder_kwargs_for_generation.<locals>.<dictcomp>  2       $3HeQY]nQnQn%QnQnQnr<   r   rD  rE  r(   r   rh   r0   rN  r/   r;   )r  r_   rZ  io_same_devicer/  r  r   r  r   r  r   rD  r1   r  main_input_namerN  r6   concatenate
zeros_liker   )rV   r  r  r  r  encoderencoder_kwargsencoder_accepts_wildcardr1   rN  rh  rd  s             @@r=   +_prepare_text_encoder_kwargs_for_generationzLMusicgenForConditionalGeneration._prepare_text_encoder_kwargs_for_generation  s     '')) 7J'' 	3.2G+ DCC
 
 
 
#/#5#5#7#7
 
 

   1'/ B B MNN#+/@#@#gNVgDg ' 	   7E7K7K7M7M  N /@.Q*+1B1W-.*9 0@/K++QUQbQr(,}%+8'(#G55n55G %.1*<*< % 13DeFVWhFiFi2jpq r r r<//161B!"23U5ElScFd5e5eflm2 2 2-. +:L]*^*^*^&'r<   c                    |                                  }t          |d          rd|j        _        g dfd|                                D             }t          t          j        |j                  j	                  dv pdv }|s fd|                                D             }||n| j
        j        }d|d<   | j        j        j        d	k    r.|||<    |j        di |}|j        }|j        }	|j        \  }
}}}n|j        d	         d
k    rt'          d|j        d	          d          |d d d d	d d f         ||<    |j        di |}|j        }|j        }|d d d	d d d f         ||<    |j        di |}|j        }|j        }|j        \  }
}}}|                    |
|d
|z  |f          }||d d d d d d d
d d f<   ||d d d d d	d d
d d f<   |d gk    s|d gk    rt+          j        ||gd	          }	nd g|z  }	|
d	k    rt'          d|
 d          |d                             || j        j        z  |          }||d<   |	|d<   |S )NrZ  Tr[  c                 T    i | ]#\  }t          fd D                        |$S )c              3   B   K   | ]}                     |          V  d S rR   r_  r`  s     r=   rL  zkMusicgenForConditionalGeneration._prepare_audio_encoder_kwargs_for_generation.<locals>.<dictcomp>.<genexpr>%	  ra  r<   rb  rc  s     @r=   r'  zaMusicgenForConditionalGeneration._prepare_audio_encoder_kwargs_for_generation.<locals>.<dictcomp>"	  re  r<   r   r  c                 $    i | ]\  }}|v 	||S r;   r;   rg  s      r=   r'  zaMusicgenForConditionalGeneration._prepare_audio_encoder_kwargs_for_generation.<locals>.<dictcomp>*	  ri  r<   rE  r(   r$   z3Expected stereo audio (2-channels) but example has z	 channel.rh   rC  rD  rE  r<  audio_scalesr;   )r  r_   rZ  rk  r/  r  r   r  r   r  r  rl  rk  r   r  encoderJ  rw  rG   rI   new_onesr6   r  r   r(  )rV   r:  r  r  ro  rp  rq  rK  rJ  rw  rL  r~   r   r   audio_encoder_outputs_leftaudio_codes_leftaudio_scales_leftaudio_encoder_outputs_rightaudio_codes_rightaudio_scales_rightr<  rh  rd  s                        @@r=   ,_prepare_audio_encoder_kwargs_for_generationzMMusicgenForConditionalGeneration._prepare_audio_encoder_kwargs_for_generation	  sw    ((** 7J'' 	3.2G+ DCC
 
 
 
#/#5#5#7#7
 
 

   1'/ B B MNN#+/@#@#gNVgDg ' 	   7E7K7K7M7M  N
 0@/K++QUQcQs(,}%<-22/;N+,$2GN$D$D^$D$D!/;K0=L.9.?+FCGG !!$)) j,J\]^J_jjj   0<AAArr111H/EN+,)7)I)I.)I)I&9E : G/;AAAqrr111H/EN+,*8'.*J*J>*J*J' ; G!<!I.>.D+FCG*33VS!i-QX4YZZK(8K111ccc111%):K111addAAA& TF**.@TF.J.J${,=?Q+RXYZZZ $v|Q;;P6 P P P  
 (/77dl>X8XZabb,=()'3^$r<   c                 `    t          || j        j        j        | j        j        j                  S rR   )rL   r   rk  r?   r  )rV   r  s     r=   %prepare_decoder_input_ids_from_labelszFMusicgenForConditionalGeneration.prepare_decoder_input_ids_from_labelsb	  s$    !&$+*=*JDKL_Llmmmr<   c                      t          d          )NzResizing the embedding layers via the EncoderDecoderModel directly is not supported. Please use the respective methods of the wrapped objects (model.encoder.resize_token_embeddings(...) or model.decoder.resize_token_embeddings(...)))NotImplementedError)rV   r   r   s      r=   resize_token_embeddingsz8MusicgenForConditionalGeneration.resize_token_embeddingse	  s    !;
 
 	
r<   c                 d    | j                                         D ]	}d|_        
d| j         _        dS )z3
        Freeze the audio encoder weights.
        FN)r  r  rc   _requires_gradrV   params     r=   freeze_audio_encoderz5MusicgenForConditionalGeneration.freeze_audio_encoderl	  s>     '2244 	( 	(E"'E,1)))r<   c                 d    | j                                         D ]	}d|_        
d| j         _        dS )z2
        Freeze the text encoder weights.
        FN)r  r  rc   r  r  s     r=   freeze_text_encoderz4MusicgenForConditionalGeneration.freeze_text_encodert	  s>     &1133 	( 	(E"'E+0(((r<   r  c                    ||S |                     d          }|K|d                                         dd         }t          j        |t          j        | j                  dz  S |t          d          d}|                                D ]+}t          |t          j	                  r|j
        d         } n,t          j        |dft          j        | j                  |z  S )	z3Initializes input ids for generation, if necessary.Nr/   r   rB   r[   rD   zB`bos_token_id` has to be defined when no `input_ids` are provided.r(   )r  rz   r6   r  r  r]   rI   valuesr  r   rG   )rV   r  r  r  r/   rG   r  r>  s           r=   *_maybe_initialize_input_ids_for_generationzKMusicgenForConditionalGeneration._maybe_initialize_input_ids_for_generation|	  s     M&**+<==&#A&++--crc2E:e5:dkJJJTQQabbb 
!((** 	 	E%.. "[^
 z:q/DKPPPS___r<   c                 p    ||n| j         j        }||n| j         j        }||S ||S t          d          )Nz\`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation.)r  r@   r  rI   )rV   r@   r  s      r=   rS  z<MusicgenForConditionalGeneration._get_decoder_start_token_id	  sc    
 &1 #"'> 	
 (4'?||TE[Eh!-))%j
 
 	
r<   r  r  r  r  r,   c           	      	   || j         }t          j        |          } |j        di |}|                                 |                     |                                           |                    d          ;t          |d                   t          u rt          |d         d                   |d<   ||nt                      }||nt                      }d|v}	|                    dd          du}
|                     ||j        |          \  }}}|j        d         }|                     ||
|j                   |j        |d<   |j        |d<   |                    dd          &|	r$|                     ||j        |j                  |d<   d|vr|                     ||||          }d	|vr d
|v r|                     |d
         |          }|                     ||||j        |j        |j                  \  }}|j        d         }|                    d          du o|j        du}|                    d          du o|j        du}|                     ||||||          }| j                            ||j        |j                  \  }}||d<   |'|                     |!                                           |"                                }|j        9|j        dk    r.|#                    tI          |j                             d|_        | %                    |||d||j                  }| &                    ||          }|tN          j(        tN          j)        fv r8 | j*        d||j+        | j,        j-        d|\  }} | j.        |f|||||d|}nt_          d          |j0        r|j1        }n|}| j        2                    ||d                   }|||j        k             3                    || j        j4        d          }|d         }|                    d          }|dg|z  }| j        j,        j5        dk    r"| j6        7                    ||          j8        }n| j6        7                    |dddddddddf         |          }|j8        }| j6        7                    |dddddddddf         |          }|j8        }ts          j:        ||gd          }|j0        r	||_1        |S |S )r  Nr/   r   rj  r0   r  r   r1   r<  r:  )r  r  r  r@   r  r]   rB   r  r  r  r  rO  r(   r  r  )r>   r  is_encoder_decoderr  r  )N.rw  )rw  r$   rh   r;   );r  r  r  r  r  r  r  typerY  r   r   r   r  r  rG   r  r]   r   r1   r  r  r  rr  r  rX  r  _bos_token_tensorr  r  r  rk  r  r  r  r  r  r   r  r  r   r  r  r  r  r   r  r  rI   r  r  r  r   r(  r  r  decodeaudio_valuesr6   rp   )rV   r  r  r  r  r  r  r   r  r  r  r  r  r  r>   r  r  r  rO  r  r  r  rw  output_valuescodec_outputs_leftoutput_values_leftcodec_outputs_rightoutput_values_rights                               r=   r  z)MusicgenForConditionalGeneration.generate	  sj   V $ $ 6 M*;<</(/99&99""$$$##L$5$5$7$7888-..:tLQbDc?d?dhm?m?m.=P\]nPopqPr.s.s.sL*+ 0@/K++QdQfQf1B1N--ThTjTj"3<"G$0$4$45Et$L$LTX$X! 9=8R8R%2L9
 9
5' #(+
$$%68QZgZn$ooo %6$?[!):)I%&,d33;@W;-1-X-X0BDUDg. .L)* L00KK|-=?P L l22~7U7ULL^, L #'"P"P!-%#4#P*< ' #Q #
 #
	< %?2.!'L!9!9T!A!nFWFbjnFn!'L!9!9T!A!nFWFbjnFn ::/#9#9-'- ; 
 
 150U0U*F(3 1V 1
 1
-	- 6P12 LL))) ,??AA +7<M<\_`<`<`##$IJ[Jj$k$klll/3,  55/!1+%)-# 6 
 
 !77/CT 8 
 
 ~4n6RSSS&Hd&H '#-B#';#A' ' 	' '#I| #dl!1"3"3'!   GG l  
 4 	! *JJ J \:::|TpGqrr
  
.?.Q QRZZ2B
 


  	*
#''77 6J.L<-22 .55) 6    M
 "&!3!:!::aaaCCaCQRQRQRl;Sbn!:!o!o!3!@"&"4";";Jqqq!!!QTPQTSTSTST}<Udp";"q"q"5"B!I'9;N&OUVWWWM4 	! -GN  r<   r(   c                     t          j        |d| j        j        j        f| j        | j                  }t          j        |df| j        t           j                  }t          |f|d          S )a  
        Helper function to get null inputs for unconditional generation, enabling the model to be used without the
        feature extractor or tokenizer.

        Args:
            num_samples (int, *optional*):
                Number of audio samples to unconditionally generate.
            max_new_tokens (int, *optional*):
                Number of tokens to generate for each sample. More tokens means longer audio samples, at the expense of
                longer inference (since more audio tokens need to be generated per sample).

        Example:
        ```python
        >>> from transformers import MusicgenForConditionalGeneration

        >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")

        >>> # get the unconditional (or 'null') inputs for the model
        >>> unconditional_inputs = model.get_unconditional_inputs(num_samples=1)
        >>> audio_samples = model.generate(**unconditional_inputs, max_new_tokens=256)
        ```r(   )r]   r\   r  )r/   r0   r1   )	r6   rt   r   r  r   r]   r\   r  r.   )rV   num_samplesrN  r0   s       r=   get_unconditional_inputsz9MusicgenForConditionalGeneration.get_unconditional_inputs
  s}    , "K!T[5AB4;^b^h
 
 
 k1%5dkQVQ[\\\).0)
 
 
 	
r<   )NNNN)NNN)NNNNNNNNNNNNNNN)
NNNNNNNNNNrR   )NNr  )r(   )>r2   r3   r4   r)   r  r  rl  r  r  r  r   r   rv  rT   r  r  r  r  rq  r:  r  r  classmethodr  strr9  r   MUSICGEN_INPUTS_DOCSTRINGr#   r   r  r6   r9   
BoolTensorr7   r	   r   r
   r   r  r   r   r   r]   rX  r   r   rr  r  r  r  r  r  r  r   rS  r   r   r   r  r  r   r   s   @r=   r  r    sz        "L)!O&*#!N ,0263715[ [([ /[  0	[
 -.[ [ [ [ [ [z; ; ; " " "! ! !' ' '  8 8 84 4 4B B B ] ] ] ] [](  ;?;?59	Bk Bk47Bk 69Bk 03	Bk 
Bk Bk Bk [BkH +*+DEE?YYY 155948378<=A>B;?59=A-1$(,0/3&*!O
 O
E,-O
 !!12O
 u01	O

 u/0O
 $E$45O
 !))9 :O
 "%(9":;O
 uU%678O
   12O
  ((9:O
 )*O
 D>O
 $D>O
 'tnO
  d^!O
$ 
uo%	&%O
 O
 O
 ZY FEO
h #!#'8
 8
 8
 8
~ '+ #-/ -/-/ -/ 3,-	-/
 !$-/ -/ -/ 
uc5<&7!88	9-/ -/ -/ -/^/|/ #3-	/
 ,/ 
c3h/ / / /d MQJ J<DSMJ J J JXnEL n n n n
 
 
2 2 21 1 1 *.&*:>	` `&` sm` tC$567	`
 
	` ` ` `: Y]
 
&+CcN&;
RU
	
 
 
 
$ U]__ *.8<:><@&*-1t! t!&t! $$45t! ##67	t!
 $$89t! d^t! >*t! t! t! _t!l 
  
  
  
  
  
  
  
r<   r  )Sr5   r  r   rj   rV  dataclassesr   typingr   r   r   r   r   r	   r
   r6   torch.nnra   r   activationsr   
generationr   r   r   r   r   r   modeling_attn_mask_utilsr   r   r   r   modeling_outputsr   r   r   r   r   modeling_utilsr   utilsr   r   r    r!   r"   r#   auto.configuration_autor%   r  r'   configuration_musicgenr)   r*   modeling_flash_attention_utilsr+   generation.streamersr,   
get_loggerr2   r   r  _CHECKPOINT_FOR_DOCr.   r   r   rL   ModulerN   r   r   r   r   r   r  MUSICGEN_START_DOCSTRINGr  rg  r  ri  rv  r  r;   r<   r=   <module>r     s         ! ! ! ! ! ! I I I I I I I I I I I I I I I I I I        % % % % % % ! ! ! ! ! !                                        . - - - - -                1 0 0 0 0 0 * * * * * * I I I I I I I I  KJJJJJJ 5444444		H	%	%"/  ! ! ! ! ! ! ! !$%, c [^    (*L *L *L *L *LBI *L *L *L\[B [B [B [B [B	 [B [B [B~|9 |9 |9 |9 |9/ |9 |9 |9~x1 x1 x1 x1 x1- x1 x1 x1x !0  x x x x x29 x x xv? ? ? ? ?o ? ? ?2 (d LK% !\S
 S
 S
 S
 S
- S
 S
 S
l d @
 @
 @
 @
 @
+ @
 @
	 @
F G ~ ~ ~ ~ ~1? ~ ~	 ~B M 
}
 }
 }
 }
 }
 }
 }
 
}
 }
 }
r<   