
    g                    6   d Z ddlZddlmZmZmZ ddlZddlZddl	Zddlm
Z
 ddlmZ ddlmZ ddlmZmZmZmZ dd	lmZ dd
lmZ ddlmZmZmZmZmZmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z' ddl(m)Z) ddl*m+Z+  e$            rddl,m-Z-  e&j.        e/          Z0dZ1dZ2dZ3dHde4de4de5dej6        fdZ7dej6        de4de4fdZ8	 	 dIdee4e4f         de5d e4d!eej9                 d"e4dej:        fd#Z; G d$ d%e
j<                  Z= G d& d'e
j>                  Z? G d( d)e?          Z@ G d* d+e?          ZAe?e@eAd,ZB G d- d.e
j>                  ZC G d/ d0e
j>                  ZD G d1 d2e           ZEd3ZFd4ZGd5ZH G d6 d7eE          ZI G d8 d9eE          ZJ e"d:eF           G d; d<eE                      ZK e"d=eF           G d> d?e+eE                      ZL G d@ dAeE          ZM e"dBeF           G dC dDeEe                      ZN e"dEeH           G dF dGeE                      ZOdS )JzPyTorch Whisper model.    N)OptionalTupleUnion)nn)CrossEntropyLoss   )ACT2FN)CacheDynamicCacheEncoderDecoderCacheStaticCache)GenerationMixin)AttentionMaskConverter)BaseModelOutput)BaseModelOutputWithPastAndCrossAttentions!CausalLMOutputWithCrossAttentionsSeq2SeqLMOutputSeq2SeqModelOutputSequenceClassifierOutput)PreTrainedModel)add_start_docstrings%add_start_docstrings_to_model_forwardis_flash_attn_2_available#is_flash_attn_greater_or_equal_2_10loggingreplace_return_docstrings   )WhisperConfig)WhisperGenerationMixin)_flash_attention_forwardr   zopenai/whisper-tiny'  lengthchannelsmax_timescalereturnc                    |dz  dk    rt          d| d          t          j        |          |dz  dz
  z  }t          j        | t          j        |dz            z            }t          j        |                               dd          |                    dd          z  }t          j        |                                |	                                gd          S )z*Returns sinusoids for positional embedding   r   zVNumber of channels has to be divisible by 2 for sinusoidal positional embeddings, got z
 channels.r   dim)

ValueErrormathlogtorchexparangeviewcatsincos)r"   r#   r$   log_timescale_incrementinv_timescalesscaled_times         h/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/whisper/modeling_whisper.py	sinusoidsr9   ?   s    !|qyemyyy
 
 	
 #h}55Q9JKY 77%,xST}:U:UUVVN,v&&++B22^5H5HB5O5OOK9koo''):):;CCCC    	input_idspad_token_iddecoder_start_token_idc                     |                      | j                  }| ddddf                                         |ddddf<   ||dddf<   |t          d          |                    |dk    |           |S )z1
    Shift input ids one token to the right.
    Nr(   r   r   z1self.model.config.pad_token_id has to be defined.i)	new_zerosshapecloner+   masked_fill_)r;   r<   r=   shifted_input_idss       r8   shift_tokens_rightrD   L   s     "++IO<<(CRC06688aaae4aaadLMMM""#4#<lKKKr:   r@   	mask_probmask_lengthattention_mask	min_masksc                 @   | \  }dk     rt          d          k    rt          d d d          t          j                            d                                          fd}|9|                    d                                                                          nfd	t          |          D             }t          j	        |ft          
          }g }	 |          }
|
dk    r|S |D ]} ||          }t          j                            t          j        |dz
  z
            |d          }t          |          dk    rdz
  }n|d         }t          j        |t          j        |
|z
  t          j        
          |z  g          }|	                    |           t          j        |	          }	t          j        |	dddddf         ||
f          }	|	                    ||
z            }	t          j                  ddddf         }t          j        |||
f                              ||
z            }|	|z   }	|	                                dz
  k    rdz
  |	|	dz
  k    <   t          j        ||	dd           |S )af  
    Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
    ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
    CPU as part of the preprocessing during training.

    Args:
        shape: The shape for which to compute masks. This should be of a tuple of size 2 where
               the first element is the batch size and the second element is the length of the axis to span.
        mask_prob:  The percentage of the whole axis (between 0 and 1) which will be masked. The number of
                    independently generated mask spans of length `mask_length` is computed by
                    `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
                    actual percentage will be smaller.
        mask_length: size of the mask
        min_masks: minimum number of masked spans
        attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
                        each batch dimension.
    r   z&`mask_length` has to be bigger than 0.zO`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: z and `sequence_length`: `c                     t          | z  z  z             }t          |          }|z  k    rz  }| dz
  z
  |k     rt          | dz
  z
  d          }|S )z;Given input length, compute how many spans should be maskedr   r   )intmax)input_lengthnum_masked_spanepsilonrF   rE   rH   sequence_lengths     r8   compute_num_masked_spanz6_compute_mask_indices.<locals>.compute_num_masked_span   s~    i,6DwNOOoy99 [(?::-<O ;?+o==!,+/"BAFFOr:   Nr(   c                     g | ]}S  rT   ).0_rQ   s     r8   
<listcomp>z)_compute_mask_indices.<locals>.<listcomp>   s    999!o999r:   )dtyper   F)replace)r+   nprandomranditemsumdetachtolistrangezerosboolchoicer0   lenconcatenateonesint32appendarraybroadcast_toreshaperM   put_along_axis)r@   rE   rF   rG   rH   
batch_sizerR   input_lengthsspec_aug_maskspec_aug_mask_idxsmax_num_masked_spanrN   rO   spec_aug_mask_idxdummy_mask_idxoffsetsrP   rQ   s    `` `           @@r8   _compute_mask_indicesrv   ]   sP   0 #(JQABBB_$$:^i : :'6: : :
 
 	
 innQ$$&&G        $ % 	2%%''..0009999uZ'8'8999  Hj/:$GGGM11/BBa% 5 511,?? I,,IlkAo677RW - 
 
  !!Q&& -q0NN.q1NN(;o(MUWU] ^ ^ ^ao op
 
 	!!"34444"455 111aaa:&5H+(V  ,33J@SVa@abb i$$T4]3Gog
4G'UVV^^'+5 G ,g5 /A"555GVYZGZ-!0CCD m%7B???r:   c                   B     e Zd Zddededee         f fdZd	dZ xZS )
WhisperPositionalEmbeddingNnum_positionsembedding_dimpadding_idxc                 L    t                                          ||           d S N)super__init__)selfry   rz   r{   	__class__s       r8   r   z#WhisperPositionalEmbedding.__init__   s#    66666r:   r   c                 Z    || j         |||j        d         z            S | j         |         S Nr   )weightr@   )r   r;   past_key_values_lengthposition_idss       r8   forwardz"WhisperPositionalEmbedding.forward   s8    ;58NQZQ`abQc8ccdd;|,,r:   r}   r   N)__name__
__module____qualname__rL   r   r   r   __classcell__r   s   @r8   rx   rx      sp        7 7c 7# 7HUXM 7 7 7 7 7 7- - - - - - - -r:   rx   c                       e Zd ZdZ	 	 	 	 	 	 ddededed	ed
ededee         dee         f fdZ	de
j        dedefdZ	 	 	 	 	 	 dde
j        dee
j                 dee         dee
j                 dee
j                 dedee
j                 dee
j        ee
j                 eee
j                          f         fdZ xZS )WhisperAttentionz=Multi-headed attention from 'Attention Is All You Need' paper        FTN	embed_dim	num_headsdropout
is_decoderbias	is_causal	layer_idxconfigc	                 p   t                                                       || _        || _        || _        ||z  | _        || _        | j        |z  | j        k    rt          d| j         d| d          | j        dz  | _        || _	        || _
        |*|r(t                              d| j        j         d           || _        t!          j        ||d          | _        t!          j        |||          | _        t!          j        |||          | _        t!          j        |||          | _        d S )	Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      zInstantiating a decoder z without passing `layer_idx` is not recommended and will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` when creating this class.Fr   )r~   r   r   r   r   head_dimr   r+   scalingr   r   loggerwarning_oncer   r   r   r   Lineark_projv_projq_projout_proj)
r   r   r   r   r   r   r   r   r   r   s
            r8   r   zWhisperAttention.__init__   sW    	""!Y.MI%$.883dn 3 3%.3 3 3   }d*$",4>+B , , ,  
 #i	95AAAi	94@@@i	94@@@	)YTBBBr:   tensorseq_lenbszc                     |                     ||| j        | j                                      dd                                          S )Nr   r'   )r1   r   r   	transpose
contiguous)r   r   r   r   s       r8   _shapezWhisperAttention._shape  s<    {{3GGQQRSUVWWbbdddr:   hidden_stateskey_value_statespast_key_valuerG   layer_head_maskoutput_attentionscache_positionr%   c                    |du}|                                 \  }	}
}|                     |                     |          | j        z  |
|	          }|?|j                            | j                  }|rd|j        | j        <   |j        }n|j        }||n|}|r)|r'|r%|j	        | j                 }|j
        | j                 }n~|                     |                     |          d|	          }|                     |                     |          d|	          }|(|s|nd}|                    ||| j        d|i          \  }}t          j        ||                    dd                    }|$|ddddddd|j        d         f         }||z   }t$          j                            |d          }|f|                                 | j        fk    r-t-          d	| j        f d
|                                            |                    dddd          |z  }t$          j                            || j        | j                  }t          j        ||          }|                                 |	| j        |
| j        fk    r5t-          d|	| j        |
| j        f d
|                                            |                    dd          }|                    |	|
| j                  }|                     |          }|||fS )#Input shape: Batch x Time x ChannelNTr(   r   r'   r   r)   z/Head mask for a single layer should be of size 	, but is r   ptraining `attn_output` should be of size )sizer   r   r   
is_updatedgetr   cross_attention_cacheself_attention_cache	key_cachevalue_cacher   r   updater.   matmulr   r@   r   
functionalsoftmaxr   r+   r1   r   r   r   rl   r   r   )r   r   r   r   rG   r   r   r   is_cross_attentionr   tgt_lenrV   query_statesr   current_states
key_statesvalue_statesattn_weightscausal_mask
attn_probsattn_outputs                        r8   r   zWhisperAttention.forward  sR    .T9',,..Wa {{4;;}#=#=#LgWZ[[%'266t~FFJ! E<@)$.9!/!E!/!D .>-I))} 	. 	Z 	'1$.AJ)5dnELLT[[%@%@"cJJJ;;t{{>'B'BBLLL)7I!St+9+@+@dn?OQ_>`, ,(
L |L*2F2Fq!2L2LMM%(AAAqqq2HJ4DR4H2H)HIK'+5L},,\r,BB&##%%$.)::: 1t~FW 1 1',,..1 1   +//2q!<<|KL]**<4<RVR_*``
l:|<<#t~w!NNN)CRVR_3` ) )$$&&) )  
 "++Aq11 "))#wGGmmK00L.88r:   )r   FTFNNNNNNFN)r   r   r   __doc__rL   floatrc   r   r   r   r.   Tensorr   r   
LongTensorr   r   r   r   s   @r8   r   r      s       GG  #'*.&C &C&C &C 	&C
 &C &C &C C=&C '&C &C &C &C &C &CReU\ eC ec e e e e 488<1526"'59M9 M9|M9 #5<0M9 !!45	M9
 !.M9 "%,/M9  M9 !!12M9 
u|Xel3XeEL>Q5RR	SM9 M9 M9 M9 M9 M9 M9 M9r:   r   c                       e Zd ZdZ fdZ	 	 	 	 	 	 ddej        deej                 dee         deej                 d	eej                 d
e	deej
                 deej        eej                 eeej                          f         fdZ xZS )WhisperFlashAttention2aJ  
    Whisper flash attention module. This module inherits from `WhisperAttention` as the weights of the module stays
    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
    flash attention and deal with padding tokens in case the input contains any of them.
    c                 b     t                      j        |i | t                       | _        d S r}   )r~   r   r   _flash_attn_uses_top_left_mask)r   argskwargsr   s      r8   r   zWhisperFlashAttention2.__init__f  s9    $)&)))
 3V2W2W.W+++r:   NFr   r   r   rG   r   r   r   r%   c           
         t          |t                    rt          d          |rt          d          |d u}|                                \  }	}
}t	          j        |                     |          |	|
| j        | j        f          }|?|j	        
                    | j                  }|rd|j	        | j        <   |j        }n|j        }||n|}|r)|r'|r%|j        | j                 }|j        | j                 }n~|                     |                     |          d|	          }|                     |                     |          d|	          }|(|s|nd }|                    ||| j        d|i          \  }}|                    dd          }|                    dd          }|}||d d d |j        d         f         }|j        }|t          j        k    rt	          j                    rt	          j                    }n3t5          | j        d	          r| j        j        }n| j        j        j        }t<                              d
| d           |                     |          }|                     |          }|                     |          }tC          |||||
| j"        r| j#        nd| j$        | j%                  }|                    |	|
d          }| &                    |          }|sd }|||fS )NzThe `static` cache implementation is not compatible with `attn_implementation='flash_attention_2'`. Use `attn_implementation='sdpa'` in the meantime, and open an issue at https://github.com/huggingface/transformerszCWhisperFlashAttention2 attention does not support output_attentionsTr(   r   r   r'   r   _pre_quantization_dtypezThe input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in .r   )r   r   use_top_left_mask)'
isinstancer   r+   r   r.   rl   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r@   rX   float32is_autocast_enabledget_autocast_gpu_dtypehasattrr   r   r   r   r   tor    r   r   r   r   r   )r   r   r   r   rG   r   r   r   r   r   r   rV   r   r   r   r   r   r   input_dtypetarget_dtyper   r   s                         r8   r   zWhisperFlashAttention2.forwardn  sR    nk22 	E  
  	dbccc .T9',,..Wa }T[[%?%?#wPTP^`d`mAnoo%'266t~FFJ! E<@)$.9!/!E!/!D .>-I))} 	. 	Z 	'1$.AJ)5dnELLT[[%@%@"cJJJ;;t{{>'B'BBLLL)7I!St+9+@+@dn?OQ_>`, ,(
L  ))!Q//
#--a33$%(,Bj.>r.B,B)BCK #(%-''(** 8$;==&?@@ 8#{B#{17$ $ $ $   (??<88L#|44J'??<88L.$(M:DLLsn"A	
 	
 	
 "))#w;;mmK00  	 LL.88r:   r   )r   r   r   r   r   r.   r   r   r   rc   r   r   r   r   r   s   @r8   r   r   ^  s        X X X X X 488<1526"'59h9 h9|h9 #5<0h9 !!45	h9
 !.h9 "%,/h9  h9 !!12h9 
u|Xel3XeEL>Q5RR	Sh9 h9 h9 h9 h9 h9 h9 h9r:   r   c                       e Zd Z	 	 	 	 	 	 ddej        deej                 dee         deej                 deej                 ded	eej                 d
e	ej        eej                 ee	ej                          f         f fdZ
 xZS )WhisperSdpaAttentionNFr   r   r   rG   r   r   r   r%   c           	      &   |s|Bt                               d           t                                          |||||||          S |du}|                                \  }	}
}|                     |                     |          |
|	          }|?|j                            | j	                  }|rd|j        | j	        <   |j
        }n|j        }||n|}|r)|r'|r%|j        | j	                 }|j        | j	                 }n~|                     |                     |          d|	          }|                     |                     |          d|	          }|(|s|nd}|                    ||| j	        d|i          \  }}|}||ddddddd|j        d         f         }| j        r
||
dk    rdnd	}t&          j        j                            ||||| j        r| j        nd
|          }|                                |	| j        |
| j        fk    r5t7          d|	| j        |
| j        f d|                                           |                    dd          }|                    |	|
| j                  }|                     |          }|d|fS )r   Na  WhisperModel is using WhisperSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.)r   r   rG   r   r   r   Tr(   r   r   r   Fr   )	attn_mask	dropout_pr   r   r   r'   ) r   r   r~   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r@   r   r.   r   r   scaled_dot_product_attentionr   r   r   r   r+   r   rl   r   r   )r   r   r   r   rG   r   r   r   r   r   r   rV   r   r   r   r   r   r   r   r   r   s                       r8   r   zWhisperSdpaAttention.forward  s     	 ;l   77??!1-- /"3- #    .T9',,..Wa {{4;;}#=#=wLL%'266t~FFJ! E<@)$.9!/!E!/!D .>-I))} 	. 	Z 	'1$.AJ)5dnELLT[[%@%@"cJJJ;;t{{>'B'BBLLL)7I!St+9+@+@dn?OQ_>`, ,(
L %%(AAAqqq2HJ4DR4H2H)HIK
 !N]{/BwQR{{DDX]	 h)FF!&*m<dll G 
 
 #t~w!NNN)CRVR_3` ) )$$&&) )  
 "++Aq11 "))#wGGmmK00D.00r:   r   )r   r   r   r.   r   r   r   rc   r   r   r   r   r   s   @r8   r   r     s         488<1526"'59^1 ^1|^1 #5<0^1 !!45	^1
 !.^1 "%,/^1  ^1 !!12^1 
u|Xel3XeEL>Q5RR	S^1 ^1 ^1 ^1 ^1 ^1 ^1 ^1 ^1 ^1r:   r   )eagerflash_attention_2sdpac                   l     e Zd Zdef fdZ	 d
dej        dej        dej        dedej        f
d	Z xZ	S )WhisperEncoderLayerr   c                 *   t                                                       |j        | _        t	          |j                 | j        |j        |j        |          | _        t          j
        | j                  | _        |j        | _        t          |j                 | _        |j        | _        t          j        | j        |j                  | _        t          j        |j        | j                  | _        t          j
        | j                  | _        d S )N)r   r   r   r   )r~   r   d_modelr   WHISPER_ATTENTION_CLASSES_attn_implementationencoder_attention_headsattention_dropout	self_attnr   	LayerNormself_attn_layer_normr   r	   activation_functionactivation_fnactivation_dropoutr   encoder_ffn_dimfc1fc2final_layer_normr   r   r   s     r8   r   zWhisperEncoderLayer.__init__D  s    263NOn4,	
 
 
 %'L$@$@!~#F$>?"(";9T^V-CDD9V3T^DD "T^ < <r:   Fr   rG   r   r   r%   c                 |   |}|                      |          }|                     ||||          \  }}}t          j                            || j        | j                  }||z   }|}|                     |          }|                     |                     |                    }t          j                            || j	        | j                  }| 
                    |          }t          j                            || j        | j                  }||z   }|j        t          j        k    rt          j        |                                          s&t          j        |                                          r9t          j        |j                  j        dz
  }t          j        || |          }|f}	|r|	|fz  }	|	S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   rG   r   r   r   i  )minrM   )r   r   r   r   r   r   r   r   r   r   r   rX   r.   float16isinfanyisnanfinforM   clamp)
r   r   rG   r   r   residualr   rV   clamp_valueoutputss
             r8   r   zWhisperEncoderLayer.forwardV  s   $ !11-@@)-')+/	 *8 *
 *
&|Q --mt|VZVc-dd =0 --m<<**488M+B+BCC--mt?Vaean-oo//--mt|VZVc-dd =0%-//K&&**,, 005M0J0J0N0N0P0P 0  +m&9::>EK!KK<[YYYM " 	'&Gr:   )F)
r   r   r   r   r   r.   r   rc   r   r   r   s   @r8   r   r   C  s        =} = = = = = =. #(0 0|0 0 	0
  0 
0 0 0 0 0 0 0 0r:   r   c                   .    e Zd Zddedef fdZ	 	 	 	 	 	 	 	 	 ddej        deej                 d	eej                 d
eej                 deej                 deej                 dee	         dee
         dee
         deej                 dej        fdZ xZS )WhisperDecoderLayerNr   r   c           	         t                                                       |j        | _        t	          |j                 | j        |j        |j        dd||          | _        |j	        | _	        t          |j                 | _        |j        | _        t          j        | j                  | _        t	          |j                 | j        |j        |j        d||          | _        t          j        | j                  | _        t          j        | j        |j                  | _        t          j        |j        | j                  | _        t          j        | j                  | _        d S )NT)r   r   r   r   r   r   r   )r   r   r   r   )r~   r   r   r   r   r   decoder_attention_headsr   r   r   r	   r   r   r   r   r   r   encoder_attnencoder_attn_layer_normr   decoder_ffn_dimr   r   r   )r   r   r   r   s      r8   r   zWhisperDecoderLayer.__init__  s0   263NOn4,
 
 
 ~#F$>?"(";$&L$@$@!5f6QRN*,
 
 
 (*|DN'C'C$9T^V-CDD9V3T^DD "T^ < <r:   FTr   rG   encoder_hidden_statesencoder_attention_maskr   cross_attn_layer_head_maskr   r   	use_cacher   r%   c                 <   |}|                      |          }|                     ||||||
          \  }}}t          j                            || j        | j                  }||z   }d}|k|}|                     |          }|                     ||||||          \  }}}t          j                            || j        | j                  }||z   }||f}|}|                     |          }| 	                    | 
                    |                    }t          j                            || j        | j                  }|                     |          }t          j                            || j        | j                  }||z   }|f}|r|||fz  }|	r||fz  }|S )a  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            encoder_hidden_states (`torch.FloatTensor`):
                cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
            encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
                `(encoder_attention_heads,)`.
            cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
                size `(decoder_attention_heads,)`.
            past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r   rG   r   r   r   r   N)r   r   rG   r   r   r   )r   r   r   r   r   r   r  r  r   r   r   r   r   )r   r   rG   r  r  r   r  r   r   r  r   r	  self_attn_weightspresent_key_valuecross_attn_weightscross_attn_present_key_valuer  s                    r8   r   zWhisperDecoderLayer.forward  s   > !11-@@ ?Cnn'))+/) ?M ?
 ?
;(*; --mt|VZVc-dd =0 " ,$H 88GGMNRN_N_+!65 :-"3 O` O OKM-/K M11-4<Z^Zg1hhM$}4M "34P Q !--m<<**488M+B+BCC--mt?Vaean-oo//--mt|VZVc-dd =0 " 	?)+=>>G 	,)++Gr:   r}   )	NNNNNNFTN)r   r   r   r   rL   r   r.   r   r   r   rc   r   r   r   r   s   @r8   r  r    sD       = =} = = = = = = =D 268<9=26=A8<,1$(59R R|R !.R  (5	R
 !) 6R "%,/R %-U\$:R !!45R $D>R D>R !!12R 
R R R R R R R Rr:   r  c                   R    e Zd ZeZdZdZdZddgZdZ	dZ
dZdZd Zdej        fdZd	S )
WhisperPreTrainedModelmodelinput_featuresTr   r  c                    | j         j        }t          |t          j        t          j        f          rJ|j        j                            d|           |j	         |j	        j        
                                 d S d S t          |t          j                  rU|j        j                            d|           |j        +|j        j        |j                 
                                 d S d S t          |t                    r[t          j                    5  |j        j        }|                    t%          |j                    d d d            d S # 1 swxY w Y   d S d S )Nr   )meanstd)r   init_stdr   r   r   Conv1dr   datanormal_r   zero_	Embeddingr{   WhisperEncoderr.   no_gradembed_positionscopy_r9   r@   )r   moduler"  r+  s       r8   _init_weightsz$WhisperPreTrainedModel._init_weights	  s   k"fry")455 	IM&&CS&999{& &&((((( '&-- 	IM&&CS&999!-"6#56<<>>>>> .-// 	I I I"("8"?%%i1F&GHHHI I I I I I I I I I I I I I I I I I	I 	Is   .EEEro   c                     |dz
  dz  dz   }|S )zH
        Computes the output length of the convolutional layers
        r   r'   rT   )r   ro   s     r8    _get_feat_extract_output_lengthsz7WhisperPreTrainedModel._get_feat_extract_output_lengths  s     '*q014r:   N)r   r   r   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_flash_attn_2_supports_sdpa_supports_cache_class_supports_static_cacher.  r.   r   r0  rT   r:   r8   r  r    s}         L&O&*#.0EF!N !I I Ie>N      r:   r  aL  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`WhisperConfig`]):
            Model configuration class with all the parameters of the model. Initializing with a config file does not
            load the weights associated with the model, only the configuration. Check out the
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
aH  
    Args:
        input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`):
            Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by
            loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
            the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
            [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
            tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
        attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing *SpecAugment* data augmentation on padding token indices. Mask values selected in
            `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Indices of decoder input sequence tokens in the vocabulary.

            Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are decoder input IDs?](../glossary#decoder-input-ids)

            Whisper uses the `decoder_start_token_id` as the starting token for `decoder_input_ids` generation. If
            `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
            `past_key_values`).
        decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
            Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
            be used by default.

            If you want to change padding behavior, you should read
            [`modeling_whisper._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the BART
            paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
        head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
            hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
        past_key_values (`EncoderDecoderCache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
            Pre-computed hidden-states that can be used to speed up auto-regressive (sequential) decoding. There are
            four sets of pre-computed hidden-states: key and values states in the self-attention blocks (2) and
            in the cross-attention blocks (2). The `past_key_values` are returned when `use_cache=True` is passed or
            when `config.use_cache=True`

            Two formats are allowed:
            - An [`~cache_utils.EncoderDecoderCache`] instance;
            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
            `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
            `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
            Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
            representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
            input (see `past_key_values`). This is useful if you want more control over how to convert
            `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
        use_cache (`bool`, *optional*):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`).
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
            Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache
            in the correct position and to infer the complete sequence length.
aY  
    Args:
        input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, sequence_length)`):
            Float values mel features extracted from the raw speech waveform. Raw speech waveform can be obtained by
            loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
            the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
            [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
            tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
        head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.
        encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
            Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
            `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
            hidden-states at the output of the last layer of the encoder.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
c                   l     e Zd ZdZdef fdZd Zdej        fdZ	dej        fdZ
	 	 	 	 	 dd
Z xZS )r)  z
    Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
    [`WhisperEncoderLayer`].

    Args:
        config: WhisperConfig
    r   c                    t                                                     j        | _        j        | _        j        }j        | _        j        | _        j	        | _	        j
        rt          j        |          nd| _        t          j        | j        |dd          | _        t          j        ||ddd          | _        t          j        | j	        |          | _        | j                            d           t          j        fdt-          j                  D                       | _        t          j        j                  | _        d| _        |                                  d S )	N      ?r   r   )kernel_sizepaddingr'   )r=  strider>  Fc                 .    g | ]}t                    S rT   )r   )rU   rV   r   s     r8   rW   z+WhisperEncoder.__init__.<locals>.<listcomp>  s"    $g$g$gQ%8%@%@$g$g$gr:   )r~   r   r   encoder_layerdrop	layerdropr   num_mel_binsr<   r{   max_source_positionsscale_embeddingr,   sqrtembed_scaler   r$  conv1conv2r(  r+  requires_grad_
ModuleListra   encoder_layerslayersr   
layer_normgradient_checkpointing	post_init)r   r   r   r   s    ` r8   r   zWhisperEncoder.__init__  sB      ~1N	"/!.$*$?!393IR49Y///sYt0)TUVVV
Yy)1VWXXX
!|D,EyQQ++E222m$g$g$g$g%PVPeJfJf$g$g$ghh,v~66&+#r:   c                 P    |                                  D ]	}d|_        
d| _        d S NF)
parametersrequires_grad_requires_grad)r   params     r8   _freeze_parametersz!WhisperEncoder._freeze_parameters  s4    __&& 	( 	(E"'E#r:   r%   c                     | j         S r}   rH  r   s    r8   get_input_embeddingsz#WhisperEncoder.get_input_embeddings  s
    zr:   valuec                     || _         d S r}   rY  r   r\  s     r8   set_input_embeddingsz#WhisperEncoder.set_input_embeddings  s    


r:   Nc           	         | j         j        | j        j        d         z  | j        j        d         z  }|j        d         |k    r$t          d| d|j        d          d| d          ||n| j         j        }||n| j         j        }||n| j         j	        }t          j                            |                     |                    }t          j                            |                     |                    }|                    ddd	          }| j        j        }	||	z   }
t          j                            |
| j        | j        
          }
|rdnd}|rdnd}|k|                                d         t'          | j                  k    s;J dt'          | j                   d|                                d          d            t+          | j                  D ]\  }}|r||
fz   }d}| j        r!t-          j        g           }|| j        k     rd}|rd}nX| j        r0| j        r)|                     |j        |
d|||         nd|          }n ||
d|||         nd|          }|d         }
|r||d	         fz   }|                     |
          }
|r||
fz   }|st;          d |
||fD                       S t=          |
||          S )aK  
        Args:
            input_features (`torch.LongTensor` of shape `(batch_size, feature_size, sequence_length)`):
                Float values of mel features extracted from the raw speech waveform. Raw speech waveform can be
                obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a
                `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into
                `input_features`, the [`AutoFeatureExtractor`] should be used for extracting the mel features, padding
                and conversion into a tensor of type `torch.FloatTensor`. See [`~WhisperFeatureExtractor.__call__`]
            attention_mask (`torch.Tensor`)`, *optional*):
                Whisper does not support masking of the `input_features`, this argument is preserved for compatibility,
                but it is not used. By default the silence in the input log mel spectrogram are ignored.
            head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        r   r(   z7Whisper expects the mel input features to be of length z, but found z-. Make sure to pad the input mel features to r   Nr'   r   r   rT   z&The head_mask should be specified for  layers, but it is for FT)NN)r   r   c              3      K   | ]}||V  	d S r}   rT   rU   vs     r8   	<genexpr>z)WhisperEncoder.forward.<locals>.<genexpr><  s(      eeqWXWdWdWdWdWdeer:   last_hidden_stater   
attentions)r   rD  rH  r?  rI  r@   r+   r   output_hidden_statesuse_return_dictr   r   gelupermuter+  r   r   r   r   re   rM  	enumerater.   r\   rB  rO  _gradient_checkpointing_func__call__rN  tupler   )r   r  rG   	head_maskr   ri  return_dictexpected_seq_lengthinputs_embeds	embed_posr   encoder_statesall_attentionsidxencoder_layerto_dropdropout_probabilitylayer_outputss                     r8   r   zWhisperEncoder.forward  s   F #k>ARSTAUUX\XbXijkXll#'::: IJ]  I  Ikyk  AC  lD  I  I  sF  I  I  I   2C1N--TXT_Tq$8$D  $+Jj 	 &1%<kk$+B]**4::n+E+EFF**4::m+D+DEE%--aA66(/	%	1--mt|VZVc-dd3=0:d  >>##A&DK    wDK8H8Hwwajaoaoaqaqrsatwww   #,DK"8"8  	F  	FC# C!/=2B!BG} #&+jnn#&77"G 1 ,. 4= $($E$E%.%+4+@3d)% %MM %2M%;D;P3VZ*;	% % %M !.a 0  F!/=3C2E!E66 	?+}.>>N 	fee]NN$Seeeeee+>Vd
 
 
 	
r:   )NNNNN)r   r   r   r   r   r   rW  r   Moduler[  r_  r   r   r   s   @r8   r)  r)    s         }      0$ $ $
bi    ")     !j
 j
 j
 j
 j
 j
 j
 j
r:   r)  c                        e Zd ZdZdZdef fdZd Zd Z	 	 	 	 	 	 	 	 	 	 	 	 	 ddZ	d	e
j        d
e
j        de
j        dedef
dZed	e
j        dedede
j        de
j        de
j        defd            Z xZS )WhisperDecoderz
    Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`WhisperDecoderLayer`]

    Args:
        config: WhisperConfig
    r;   r   c                    t                                                     j        | _        j        | _        j        | _        j        | _        j        | _        j	        rt          j        j                  nd| _        t          j        j        j        | j                  | _        t%          | j        j                  | _        t          j        fdt+          j                  D                       | _        j        dk    | _        j        dk    | _        t          j        j                  | _        d| _        |                                  d S )Nr<  c                 0    g | ]}t          |          S rT   )r  )rU   r   r   s     r8   rW   z+WhisperDecoder.__init__.<locals>.<listcomp>Y  s$    bbb	 33bbbr:   r   r   F)r~   r   r   decoder_layerdroprB  r<   r{   max_target_positionsrD  rE  r,   rF  r   rG  r   r(  
vocab_sizeembed_tokensrx   r+  rK  ra   decoder_layersrM  r   _use_flash_attention_2	_use_sdpar   rN  rO  rP  r   s    `r8   r   zWhisperDecoder.__init__L  s1      ~1!.$*$?!$*$?!8>8NW49V^444TWL):FNDL\]]9$:SU[UcddmbbbbU6K`EaEabbb
 
 '-&AEX&X#4>,v~66&+#r:   c                     | j         S r}   r  rZ  s    r8   r[  z#WhisperDecoder.get_input_embeddingsd  s      r:   c                     || _         d S r}   r  r^  s     r8   r_  z#WhisperDecoder.set_input_embeddingsg  s    !r:   Nc                 	   |
|
n| j         j        }
||n| j         j        }|	|	n| j         j        }	||n| j         j        }||t          d          |1|                                }|                    d|d                   }n.||                                dd         }nt          d          ||                     |          }d}d}|	s|t          |t                    r4t          |t                    sd}t          |t                                }nEt          |t                    s0d}t                              d           t          j        |          }d}|	|d         }n||                                }|%t#          j        |||d	         z   |j        
          }||                    d          }||                     |||          }n|                     |||          }||                    |j                  z   }t.          j                            || j        | j                  }|                     |||||j        nd|
          }| j        r%| j        r|	rt                              d           d}	|rdnd}|
rdnd}|
r|dnd}t=          ||gddg          D ]u\  }}|n|                                d         t?          | j                   k    s>J d| dt?          | j                    d|                                d          d            vtC          | j                   D ]\  }}|r||fz  }| j        r t#          j"        g           }|| j#        k     r4| j        r@| j        r9| $                    |j%        |||d|||         nd|||         ndd|
|	|          }n, |||||||         nd|||         nd|	r|nd|
|	|	  	        }|d         }|
r||d	         fz  }|||d         fz  }| &                    |          }|r||fz  }|	r|nd}|r|j        }|r|'                                }|stQ          d |||||fD                       S tS          |||||          S )ac  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`WhisperTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                of the decoder.
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
                on hidden heads. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`EncoderDecoderCache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
                Pre-computed hidden-states that can be used to speed up auto-regressive (sequential) decoding. There are
                four sets of pre-computed hidden-states: key and values states in the self-attention blocks (2) and
                in the cross-attention blocks (2). The `past_key_values` are returned when `use_cache=True` is passed or
                when `config.use_cache=True`

                Two formats are allowed:
                - An [`~cache_utils.EncoderDecoderCache`] instance;
                - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
                `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            inputs_embeds (`torch.FloatTensor` of
                shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
                `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
                control over how to convert `input_ids` indices into associated vectors than the model's internal
                embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
                cache in the correct position and to infer the complete sequence length.
        NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timer(   zEYou have to specify either decoder_input_ids or decoder_inputs_embedsFTzPassing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.43.0. You should pass an instance of `EncoderDecoderCache` instead, e.g. `past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`.r   r   device)r   r   r   z^`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`...rT   rq  cross_attn_head_maskzThe `z` should be specified for ra  r   )rG   r  r   r  r   r   r  r   r'   c              3      K   | ]}||V  	d S r}   rT   rc  s     r8   re  z)WhisperDecoder.forward.<locals>.<genexpr>L  s0        =  === r:   )rg  past_key_valuesr   rh  cross_attentions)*r   r   ri  r  rj  r+   r   r1   r  r   r
   r   r   r   r   from_legacy_cacheget_seq_lengthr.   r0   r  	unsqueezer+  r   r   r   r   r   _update_causal_maskr   rO  zipre   rM  rm  r\   rB  rn  ro  rN  to_legacy_cacherp  r   )r   r;   rG   r  rq  r  r  rt  r   r  r   ri  rr  r   input_shapereturn_legacy_cachereturn_self_attention_cacher   	positionsr   r   all_hidden_statesall_self_attnsall_cross_attentionsr   	mask_namerx  decoder_layerr{  r|  
next_caches                                  r8   r   zWhisperDecoder.forwardj  s)   b 2C1N--TXT_Tq$8$D  $+Jj 	 "+!6IIDK<Q	%0%<kk$+B]  ]%>sttt"#..**K!r;r?;;II&',,..ss3KKdeee  --i88M#&+# 	Y3/511 
Y*_Vi:j:j 
Y.2+"5o|~~"V"V1DEE Y&*###`  
 #6"G"X"X!"%%3A%6""(%4%C%C%E%E"!"\&(>Q(OXeXl  N )33A66L  ,,2HWc -  II ,,6L[g -  I &	]5I(J(JJ--mt|VZVc-dd..4C4OO00UY
 
 & 	"4= 	" "##t   "	"6@BBD0:d&7h<Q<]rrdh %(4H(IKYoKp$q$q 	 	 Iy$ ~~''*s4;/?/?@@@/I / /T[AQAQ / /!((+/ / / A@@ #,DK"8"8 +	@ +	@C# 6!m%55!} &+jnn#&77* t}  $ A A!*!)&/&;IcNN1E1Q(--W[%"! ! !.!#.*?7@7LYs^^RV5I5U,S11[_6?#I??T&7'#1! ! ! *!,M  @=#3"55(4(]1-=,??(66 	2-!11(1;__t
& 	>(=J 	;(88::J 	  '5FXlm     
 9+&+%1
 
 
 	
r:   rG   input_tensorr   r  r   c           
         | j         j        dk    r
|d|v r|S d S ||                                nd}t          |t                    }| j         j        dk    r#|s!|st          j        |||| j                  rd S |j        |j	        }	}|j
        d         }
|r|                                }n/t          |t          j                  r|j
        d         n||
z   dz   }|                     ||
|||	||j
        d                   }| j         j        dk    rB|@|j	        j        d	k    r0|s.t          j        |          j        }t          j        ||          }|S )
Nr   r   r   r   )rt  r   is_trainingr   r(   rQ   target_lengthrX   r  r   rn   cuda)r   r   r  r   r   r   _ignore_causal_mask_sdpar   rX   r  r@   get_max_cache_shaper.   r   5_prepare_4d_causal_attention_mask_with_cache_positiontyper  r  _unmask_unattended)r   rG   r  r   r  r   past_seen_tokensusing_static_cacherX   r  rQ   r  r   	min_dtypes                 r8   r  z"WhisperDecoder._update_causal_maskZ  s    ;+/BBB)c^.C.C%%4
 @O?Z?99;;;`a'EE ;+v55>P5Yj5%>*'7 M	    t$*L,?v&,Q/ 	+??AAMM nel;;<$R((%7!;  PP+')#)!, Q 
 
 K,66*%*f44% 5 E**.I0CKQZ[[Kr:   rQ   r  rX   r  rn   c                    | |                                  dk    r| }n+t          j        |          j        }	t          j        ||f|	||          }|dk    rt          j        |d          }|t          j        ||          |                    dd          k    z  }|ddddddf                             |ddd          }| |	                                }| j
        d         }
|ddddddd|
f         | ddddddf         z   }|dk    }|ddddddd|
f                             ||	          |ddddddd|
f<   |S )	a  
        Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
        `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.

        Args:
            attention_mask (`torch.Tensor`):
                A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape
                `(batch_size, 1, query_length, key_value_length)`.
            sequence_length (`int`):
                The sequence length being processed.
            target_length (`int`):
                The target length: when generating with static cache, the mask should be as long as the static cache,
                to account for the 0 padding, the part of the cache that is not filled yet.
            dtype (`torch.dtype`):
                The dtype to use for the 4D attention mask.
            device (`torch.device`):
                The device to plcae the 4D attention mask on.
            cache_position (`torch.Tensor`):
                Indices depicting the position of the input sequence tokens in the sequence.
            batch_size (`torch.Tensor`):
                Batch size.
        N   )
fill_valuerX   r  r   )diagonalr  r(   r   )r*   r.   r  r  fulltriur0   rl   expandrA   r@   masked_fill)rG   rQ   r  rX   r  r   rn   r   r   r  rF   padding_masks               r8   r  zDWhisperDecoder._prepare_4d_causal_attention_mask_with_cache_position  s   D %.*<*<*>*>!*C*C(KKE**.I* -0Ye\b  K !###jqAAA5<fEEEH^H^_acdHeHeeeK%dD!!!QQQ&67>>z1bRTUUK))//11,226*111aaaL[L+@ANSTSTSTVZ\`bcbcbcScDdd+q05@AAAqqq,;,AV5W5c5c )6 6AAAqqq!!!\k\12 r:   NNNNNNNNNNNNN)r   r   r   r   r3  r   r   r[  r_  r   r.   r   r
   rc   r  staticmethodrL   rX   r  r  r   r   s   @r8   r  r  B  sj         "O}      0! ! !" " "
 "!!m
 m
 m
 m
`?? l? 	?
 ?  ? ? ? ?B 555 5 {	5
 5 5 5 5 5 \5 5 5 5 5r:   r  zUThe bare Whisper Model outputting raw hidden-states without any specific head on top.c            '           e Zd Zdef fdZd Zd Zd Zd Zd Z		 dd	e
j        d
ee
j                 fdZ ee           eee          	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	ee
j                 d
ee
j                 dee
j                 dee
j                 dee
j                 dee
j                 dee
j                 deeee
j                                   deeeee
j                 f                  deee
j                          deee
j                          dee         dee         dee         dee         dee
j                 deee
j                 ef         f"d                        Z xZS )WhisperModelr   c                     t                                          |           t          |          | _        t	          |          | _        |                                  d S r}   )r~   r   r)  encoderr  decoderrP  r   s     r8   r   zWhisperModel.__init__  sO       %f--%f--r:   c                     | j         j        S r}   r  r  rZ  s    r8   r[  z!WhisperModel.get_input_embeddings      |((r:   c                     || j         _        d S r}   r  r^  s     r8   r_  z!WhisperModel.set_input_embeddings      $)!!!r:   c                     | j         S r}   )r  rZ  s    r8   get_encoderzWhisperModel.get_encoder  
    |r:   c                     | j         S r}   r  rZ  s    r8   get_decoderzWhisperModel.get_decoder  r  r:   c                 8    | j                                          dS z
        Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
        not be updated during training.
        Nr  rW  rZ  s    r8   freeze_encoderzWhisperModel.freeze_encoder      
 	'')))))r:   Nr  rG   c                 ~   t          | j        dd          s|S |                                \  }}}| j        j        dk    r| j        rt          ||f| j        j        | j        j        || j        j                  }t          j	        ||j
        t          j                  }|dddf                             d|d          }d||<   | j        j        dk    re| j        r^t          ||f| j        j        | j        j        | j        j                  }t          j	        ||j
        t          j                  }d||<   |S )	z
        Masks extracted features along time axis and/or along feature axis according to
        [SpecAugment](https://arxiv.org/abs/1904.08779).
        apply_spec_augmentTr   )rE   rF   rG   rH   )r  rX   Nr(   )rE   rF   rH   )getattrr   r   mask_time_probr   rv   mask_time_lengthmask_time_min_masksr.   r   r  rc   r  mask_feature_probmask_feature_lengthmask_feature_min_masks)r   r  rG   rn   hidden_sizerQ   mask_time_indicesmask_feature_indicess           r8   _mask_input_featuresz!WhisperModel._mask_input_features  s[    t{$8$?? 	"!! 4B3F3F3H3H0
K;%))dm) 5_-+4 K8-+9! ! ! !&->~G\didn o o o 1!!!T' : A A"kSU V V01N,-;(1,,,#8[)+7 K;+<	$ $ $  $)<0D^Mbjojt#u#u#u 34N/0r:   output_typer1  decoder_input_idsdecoder_attention_maskrq  decoder_head_maskr  encoder_outputsr  decoder_inputs_embedsdecoder_position_idsr  r   ri  rr  r   r%   c                    ||n| j         j        }||n| j         j        }||n| j         j        }||n| j         j        }|2|                     ||          }|                     |||||          }ne|rct          |t                    sNt          |d         t          |          dk    r|d         ndt          |          dk    r|d         nd          }| 
                    |||d         |||	|
||||||          }|s||z   S t          |j        |j        |j        |j        |j        |j        |j        |j        	          S )
a{  
        Returns:

        Example:
         ```python
         >>> import torch
         >>> from transformers import AutoFeatureExtractor, WhisperModel
         >>> from datasets import load_dataset

         >>> model = WhisperModel.from_pretrained("openai/whisper-base")
         >>> feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-base")
         >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
         >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
         >>> input_features = inputs.input_features
         >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
         >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
         >>> list(last_hidden_state.shape)
         [1, 2, 512]
         ```N)rG   rq  r   ri  rr  r   r   r'   rf  )r;   rG   r  rq  r  r  rt  r   r  r   ri  rr  r   )rg  r  decoder_hidden_statesdecoder_attentionsr  encoder_last_hidden_stater  encoder_attentions)r   r   ri  r  rj  r  r  r   r   re   r  r   rg  r  r   rh  r  )r   r  rG   r  r  rq  r  r  r  r  r  r  r  r   ri  rr  r   decoder_outputss                     r8   r   zWhisperModel.forward   s   P 2C1N--TXT_Tq$8$D  $+Jj 	 "+!6IIDK<Q	%0%<kk$+B]"!66~Vd6eeN"ll#"3%9' +  OO  	O_!M!M 	-"1!"4474H4H14L4Loa00RV14_1E1E1I1I?1--t  O ,,'1"1!"4'!5+/-/!5#) ' 
 
   	5"_44!-?+;"1"?.9,=&5&G"1"?.9	
 	
 	
 		
r:   r}   )NNNNNNNNNNNNNNNN)r   r   r   r   r   r[  r_  r  r  r  r.   FloatTensorr   r   r  r   WHISPER_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr   r   r   r   rc   r   r   r   s   @r8   r  r    s       
}      ) ) )* * *    * * * 6:) ))) !!12) ) ) )V +*+CDD+=O\\\ 7;598<=A,0487;EIZ^DHBF$(,0/3&*59#\
 \
 !23\
 !!12\
 $E$45	\

 !))9 :\
 EL)\
 $EL1\
 'u|4\
 "%e.?(@"AB\
 "%(;U5CT=U(U"VW\
  (e.?(@A\
 'uU-='>?\
 D>\
 $D>\
 'tn\
  d^!\
" !!12#\
$ 
uU\"$66	7%\
 \
 \
 ]\ ED\
 \
 \
 \
 \
r:   r  z^The Whisper Model with a language modeling head. Can be used for automatic speech recognition.c            )           e Zd ZdZdgZdef fdZd Zd Zd Z	d Z
d	ej        fd
Zd Z ee           eee          	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d!deej                 deej                 deej                 deej                 deej                 deej                 deej                 deeeej                                   deeeeej                 f                  deeej                          deeej                          deej                 dee         dee         dee         dee         deej                 d	eeej                 ef         f$d                        Z	 	 	 	 	 	 d"d Z xZS )#WhisperForConditionalGenerationr  proj_out.weightr   c                     t                                          |           t          |          | _        t	          j        |j        |j        d          | _        |j	        | _	        | 
                                 d S NFr   )r~   r   r  r  r   r   r   r  proj_outr  rP  r   s     r8   r   z(WhisperForConditionalGeneration.__init__  sj       !&))
	&.&2C%PPP$*$?! 	r:   c                 4    | j                                         S r}   )r  r  rZ  s    r8   r  z+WhisperForConditionalGeneration.get_encoder      z%%'''r:   c                 4    | j                                         S r}   )r  r  rZ  s    r8   r  z+WhisperForConditionalGeneration.get_decoder  r  r:   c                     | j         S r}   r  rZ  s    r8   get_output_embeddingsz5WhisperForConditionalGeneration.get_output_embeddings  
    }r:   c                     || _         d S r}   r  r   new_embeddingss     r8   set_output_embeddingsz5WhisperForConditionalGeneration.set_output_embeddings      &r:   r%   c                 4    | j                                         S r}   r  r[  rZ  s    r8   r[  z4WhisperForConditionalGeneration.get_input_embeddings      z..000r:   c                 B    | j         j                                         dS r  )r  r  rW  rZ  s    r8   r  z.WhisperForConditionalGeneration.freeze_encoder  s!    
 	
--/////r:   r  Nr  rG   r  r  rq  r  r  r  r  r  r  labelsr  r   ri  rr  r   c                    ||n| j         j        }|e|j        d         | j        k    r&t	          d|j        d          d| j         d          |'|
%t          || j         j        | j         j                  }|                     |||||||||	|
||||||          }| 	                    |d                   }d}|et                      }|                    |j                  } ||                    d| j         j                  |                    d                    }|s|f|dd         z   }||f|z   n|S t!          |||j        |j        |j        |j        |j        |j        |j        		  	        S )
a  
        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Labels for computing the language modeling loss. Indices should either be in `[0, ..., config.vocab_size]`
            or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is
            only computed for the tokens with labels in `[0, ..., config.vocab_size]`. `sequence_length` should be smaller than or equal to `config.max_target_positions`.

        Returns:

        Example:

        ```python
        >>> import torch
        >>> from transformers import AutoProcessor, WhisperForConditionalGeneration
        >>> from datasets import load_dataset

        >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en")
        >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")

        >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")

        >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt")
        >>> input_features = inputs.input_features

        >>> generated_ids = model.generate(inputs=input_features)

        >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
        >>> transcription
        ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
        ```Nr   zLabels' sequence length z- cannot exceed the maximum allowed length of z tokens.)rG   r  r  r  rq  r  r  r  r  r  r  r   ri  rr  r   r   r(   )	losslogitsr  r  r  r  r  r  r  )r   rj  r@   r  r+   rD   r<   r=   r  r  r   r   r  r1   r  rl   r   r  r  r  r  r  r  r  )r   r  rG   r  r  rq  r  r  r  r  r  r  r  r  r   ri  rr  r   r  	lm_logitsr  loss_fctoutputs                          r8   r   z'WhisperForConditionalGeneration.forward  s   f &1%<kk$+B]|A!:::  Qv|A  Q  Qmq  nG  Q  Q  Q   !(-B-J$6DK4dk6X% %! **)/+#9/!5+"7!5/!5#)!  
 
$ MM'!*--	'))HYYy/00F8INN2t{/EFFWYHZHZ[[D 	F\GABBK/F)-)9TGf$$vE#3")"?&9$5&-&G")"?&9

 

 

 
	
r:   c           	          d }	|,|                     d          dz
                      d          }	d}
|t          |t                    r||d         n|                                }
n|d         d         j        d         }
|j        d         |
k    r|
}n|j        d         dz
  }|d d |d f         }|	.|	d d |d f         }	|	                    t          j                  }	|+t          j	        |
|
|j        d         z   |j
                  }n|r||j        d          d          }|                                }t          |t                    rt          |j        t                    st          |j        t                    rp|n|j        dk    rc|j        \  }}|                                                     |||j                                        | j        j        j        |j
        ||          }||||||	|d	S )
Nr(   r   r   )r  r'   )memory_formatr  r  )r  r  r  r  r  r  r   )cumsumr  r   r   r  r@   rA   r.   contiguous_formatr0   r  r   r   r   r   ndimr  r  r  r  r   rX   )r   r  r  r  r  rG   r  r   r   r  past_lengthremove_prefix_lengthrn   rQ   s                 r8   prepare_inputs_for_generationz=WhisperForConditionalGeneration.prepare_inputs_for_generation  sr     $!-$:$A$A"$E$E$I#P#PUV#P#W#W &/+>?? =3A3MnQ//SbSqSqSsSs-a039!< !&q)K77'2$$ (9'>q'AA'E$ 1!!!5I5J5J2J K#/';AAA?S?T?T<T'U$';'A'APUPg'A'h'h$!"\[+<+B1+EEN_Nf  NN  	K+->-DQ-G,G,I,IJN .88:: (;<<	 ??MM	 oC[QQ		 '2&+q00*;*A'J%)%5%5%7%7%m%m& /-BVVXXm*0(/-% &n & &"  /.!2"&<$8,
 
 	
r:   )NNNNNNNNNNNNNNNNN)NNNNNN) r   r   r   r2  _tied_weights_keysr   r   r  r  r  r   r   r}  r[  r  r   r  r   r   r  r   r.   r  r   r   r   r   r   rc   r   r  r   r   s   @r8   r  r    s       
  +,}      ( ( (( ( (  ' ' '1bi 1 1 1 10 0 0 +*+CDD?YYY 7;598<=A,0487;EIZ^DHBF-1$(,0/3&*59%f
 f
 !23f
 !!12f
 $E$45	f

 !))9 :f
 EL)f
 $EL1f
 'u|4f
 "%e.?(@"ABf
 "%(;U5CT=U(U"VWf
  (e.?(@Af
 'uU-='>?f
 )*f
 D>f
 $D>f
  'tn!f
" d^#f
$ !!12%f
& 
uU\"O3	4'f
 f
 f
 ZY EDf
V #P
 P
 P
 P
 P
 P
 P
 P
r:   r  c                   4     e Zd ZdZ fdZd Zd Zd Z xZS )WhisperDecoderWrapperz
    This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
    used in combination with the [`EncoderDecoderModel`] framework.
    c                     t                                          |           d|_        t          |          | _        d S rR  )r~   r   is_encoder_decoderr  r  r   s     r8   r   zWhisperDecoderWrapper.__init__k  s6       $)!%f--r:   c                     | j         j        S r}   r  rZ  s    r8   r[  z*WhisperDecoderWrapper.get_input_embeddingsp  r  r:   c                     || j         _        d S r}   r  r^  s     r8   r_  z*WhisperDecoderWrapper.set_input_embeddingss  r  r:   c                      | j         |i |S r}   r  )r   r   r   s      r8   r   zWhisperDecoderWrapper.forwardv  s    t|T,V,,,r:   )	r   r   r   r   r   r[  r_  r   r   r   s   @r8   r  r  e  so         
. . . . .
) ) )* * *- - - - - - -r:   r  zx
    Whisper decoder with a language modeling head on top (linear layer with weights tied to the input embeddings).
    c                        e Zd ZdgZdZ fdZd Zd Zdej	        fdZ
d Zd	 Zd
 Z eee          	 	 	 	 	 	 	 	 	 	 	 	 	 ddej        deej                 deeej                          deej                 deej                 deeeej                                   deej                 deej                 dee         dee         dee         dee         deej                 deeef         fd            Zed             Z xZS )WhisperForCausalLMr  r;   c                     t                                          |           d|_        t          |          | _        t          j        |j        |j        d          | _	        | 
                                 d S r  )r~   r   r  r  r  r   r   r  r  r  rP  r   s     r8   r   zWhisperForCausalLM.__init__  sh       $)!*622
	&"4f6GeTTT 	r:   c                     | j         S r}   r  rZ  s    r8   r  z(WhisperForCausalLM.get_output_embeddings  r  r:   c                     || _         d S r}   r  r  s     r8   r   z(WhisperForCausalLM.set_output_embeddings  r  r:   r%   c                 4    | j                                         S r}   r  rZ  s    r8   r[  z'WhisperForCausalLM.get_input_embeddings  r  r:   c                 :    | j                             |           d S r}   )r  r_  r^  s     r8   r_  z'WhisperForCausalLM.set_input_embeddings  s    
''.....r:   c                     || j         _        d S r}   r  r  )r   r  s     r8   set_decoderzWhisperForCausalLM.set_decoder  s    $
r:   c                     | j         j        S r}   r%  rZ  s    r8   r  zWhisperForCausalLM.get_decoder  s    z!!r:   r  NrG   r  rq  r  r  rt  r  r  r   ri  rr  r   c                    |
|
n| j         j        }
||n| j         j        }||n| j         j        }t	          |t
          t          t          f          r|d         }| j        	                    ||||||||	|
|||          }| 
                    |d                   }d}|e|                    |j                  }t                      } ||                    d| j         j                  |                    d                    }|s|f|dd         z   }||f|z   n|S t!          |||j        |j        |j        |j                  S )a  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
                [What are attention masks?](../glossary#attention-mask)
            encoder_outputs  (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
                if the model is configured as a decoder.
            head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.
            cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.
            past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
                tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains
                pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
                blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If
                `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
                don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
                `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
                Indices depicting the position of the input sequence tokens in the sequence. It is used to update the cache
                in the correct position and to infer the complete sequence length.

        Returns:

        Example:

        ```python
        >>> from transformers import WhisperForCausalLM, WhisperForConditionalGeneration, WhisperProcessor
        >>> import torch
        >>> from datasets import load_dataset

        >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2")
        >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2")

        >>> assistant_model = WhisperForCausalLM.from_pretrained("distil-whisper/distil-large-v2")

        >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
        >>> sample = ds[0]["audio"]
        >>> input_features = processor(
        ...     sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt"
        ... ).input_features

        >>> predicted_ids = model.generate(input_features, assistant_model=assistant_model)

        >>> # decode token ids to text
        >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
        >>> transcription
        ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.'
        ```Nr   )r;   rG   r  rq  r  r  rt  r  r   ri  rr  r   r(   r   )r  r	  r  r   rh  r  )r   r   ri  rj  r   r   rp  listr  r  r  r   r  r   r1   r  r   r  r   rh  r  )r   r;   rG   r  rq  r  r  rt  r  r  r   ri  rr  r   r  r	  r  r  r  s                      r8   r   zWhisperForCausalLM.forward  s   J 2C1N--TXT_Tq$8$D  $+Jj 	 &1%<kk$+B] o'EFF 	1-a0O *$$)"1!5+'/!5#) % 
 
 wqz**YYv}--F'))H8FKKDK,BCCV[[QS__UUD 	DY,F'+'7D7V##VC0#3!/)$5
 
 
 	
r:   c                 T    d}| D ]!}|t          fd|D                       fz  }"|S )NrT   c              3   t   K   | ]2}|                     d                     |j                            V  3dS r   )index_selectr   r  )rU   
past_statebeam_idxs     r8   re  z4WhisperForCausalLM._reorder_cache.<locals>.<genexpr>9  sC      nnU_j--aZ=N1O1OPPnnnnnnr:   )rp  )r  r.  reordered_past
layer_pasts    `  r8   _reorder_cachez!WhisperForCausalLM._reorder_cache4  sQ    ) 	 	Jnnnncmnnnnn NN r:   r  )r   r   r   r  r3  r   r  r   r   r}  r[  r_  r&  r  r   r   r  r.   r   r   r   r   r  rc   r   r   r  r1  r   r   s   @r8   r  r  z  s5        ,,!O      ' ' '1bi 1 1 1 1/ / /% % %" " " +L[jkkk '+15>B,07;EI59-1$(,0/3&*59Q
 Q
#Q
 !.Q
 "%(9":;	Q

 EL)Q
 'u|4Q
 "%e.?(@"ABQ
   12Q
 )*Q
 D>Q
 $D>Q
 'tnQ
 d^Q
 !!12Q
 
u77	8Q
 Q
 Q
 lkQ
f   \    r:   r  z
    Whisper Encoder Model with a sequence classification head on top (a linear layer over the pooled output) for tasks
    like SUPERB Keyword Spotting.
    c                       e Zd Z fdZd Zdej        fdZdej        fdZ e	e
           eee          	 	 	 	 	 	 	 dd	eej                 d
eej                 deeeej                                   deej                 dee         dee         dee         deeej                 ef         fd                        Z xZS )WhisperForAudioClassificationc                    t                                          |           t          |          | _        |j        dz   }|j        r.t          j        t          j	        |          |z            | _
        t          j        |j        |j                  | _        t          j        |j        |j                  | _        |                                  d S r   )r~   r   r)  r  num_hidden_layersuse_weighted_layer_sumr   	Parameterr.   rg   layer_weightsr   r  classifier_proj_size	projector
num_labels
classifierrP  )r   r   
num_layersr   s      r8   r   z&WhisperForAudioClassification.__init__F  s       %f---1
( 	S!#ej.D.Dz.Q!R!RD6#5v7RSS)F$?ARSS 	r:   c                 8    | j                                          dS )z
        Calling this function will disable the gradient computation for the Whisper encoder so that its parameters will
        not be updated during training. Only the projection layers and classification head will be updated.
        Nr  rZ  s    r8   r  z,WhisperForAudioClassification.freeze_encoderS  r  r:   r%   c                 4    | j                                         S r}   )r  r[  rZ  s    r8   r[  z2WhisperForAudioClassification.get_input_embeddingsZ  s    |00222r:   r\  c                 :    | j                             |           d S r}   )r  r_  r^  s     r8   r_  z2WhisperForAudioClassification.set_input_embeddings]  s    ))%00000r:   r  Nr  rq  r  r  r   ri  rr  c                    ||n| j         j        }||n| j         j        }| j         j        rd}n|| j         j        }||n| j         j        }||                     |||||          }| j         j        rx|t                   }t          j        |d          }t          j
                            | j        d          }	||	                    ddd          z                      d          }n|d         }|                     |          }|                    d          }
|                     |
          }d}|et%                      }|                    |j                  } ||                    d| j         j                  |                    d                    }|s|f|dd         z   }||f|z   n|S t-          |||j        |j                  S )	a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Returns:

        Example:

        ```python
        >>> import torch
        >>> from transformers import AutoFeatureExtractor, WhisperForAudioClassification
        >>> from datasets import load_dataset

        >>> feature_extractor = AutoFeatureExtractor.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id")
        >>> model = WhisperForAudioClassification.from_pretrained("sanchit-gandhi/whisper-medium-fleurs-lang-id")

        >>> ds = load_dataset("google/fleurs", "all", split="validation", streaming=True)
        >>> sample = next(iter(ds))

        >>> inputs = feature_extractor(
        ...     sample["audio"]["array"], sampling_rate=sample["audio"]["sampling_rate"], return_tensors="pt"
        ... )
        >>> input_features = inputs.input_features

        >>> with torch.no_grad():
        ...     logits = model(input_features).logits

        >>> predicted_class_ids = torch.argmax(logits).item()
        >>> predicted_label = model.config.id2label[predicted_class_ids]
        >>> predicted_label
        'Afrikaans'
        ```NTr  r   r)   r(   r   )r  r	  r   rh  )r   r   ri  r6  rj  r  _HIDDEN_STATES_START_POSITIONr.   stackr   r   r   r8  r1   r^   r:  r!  r<  r   r   r  r;  r   r   rh  )r   r  rq  r  r  r   ri  rr  r   norm_weightspooled_outputr	  r  r  r  s                  r8   r   z%WhisperForAudioClassification.forward`  s   ^ 2C1N--TXT_Tq$8$D  $+Jj 	 ;- 	D#'  !)#';#C %0%<kk$+B]""ll#"3%9' +  O ;- 	/+,IJM!K1===M=001C0LLL*\->->r1a-H-HHMMRSMTTMM+A.M}55%**q*11//'))HYYv}--F8FKKDK,BCCV[[QS__UUD 	FY!44F)-)9TGf$$vE')7&1	
 
 
 	
r:   )NNNNNNN)r   r   r   r   r  r   r}  r[  r_  r    WHISPER_ENCODER_INPUTS_DOCSTRINGr   r   r  r   r.   r   r   r   r  rc   r   r   r   r   s   @r8   r3  r3  >  s           * * *3bi 3 3 3 31") 1 1 1 1 +*+KLL+CRabbb 6:,0EI-1,0/3&*_
 _
 !12_
 EL)_
 "%e.?(@"AB	_

 )*_
 $D>_
 'tn_
 d^_
 
uU\"$<<	=_
 _
 _
 cb ML_
 _
 _
 _
 _
r:   r3  )r!   )Nr   )Pr   r,   typingr   r   r   numpyrZ   r.   torch.utils.checkpointr   torch.nnr   activationsr	   cache_utilsr
   r   r   r   
generationr   modeling_attn_mask_utilsr   modeling_outputsr   r   r   r   r   r   modeling_utilsr   utilsr   r   r   r   r   r   configuration_whisperr   generation_whisperr   modeling_flash_attention_utilsr    
get_loggerr   r   rB  r  _CHECKPOINT_FOR_DOCrL   r   r   r9   rD   r   ndarrayrv   r(  rx   r}  r   r   r   r   r   r  r  WHISPER_START_DOCSTRINGr  rF  r)  r  r  r  r  r  r3  rT   r:   r8   <module>rY     sn      ) ) ) ) ) ) ) ) ) )                % % % % % % ! ! ! ! ! ! P P P P P P P P P P P P ) ) ) ) ) ) > > > > > >                . - - - - -                1 0 0 0 0 0 6 6 6 6 6 6  KJJJJJJ 
	H	%	% ! !+ 	D 	Dc 	DS 	D 	D5< 	D 	D 	D 	D%, c [^    * 26t tc?tt t U-.	t
 t Zt t t tn- - - - - - - -|9 |9 |9 |9 |9ry |9 |9 |9~x9 x9 x9 x9 x9- x9 x9 x9v_1 _1 _1 _1 _1+ _1 _1 _1F /   C C C C C") C C CLr r r r r") r r rj         _      F  Z x$  8V
 V
 V
 V
 V
+ V
 V
 V
rP P P P P+ P P Pf [ e
 e
 e
 e
 e
) e
 e
	 e
P d ]
 ]
 ]
 ]
 ]
&<>T ]
 ]
	 ]
@- - - - -2 - - -*  	 { { { { {/ { { {|  % |
 |
 |
 |
 |
$: |
 |
 |
 |
 |
r:   