
    g                     D   d Z ddlZddlmZmZmZmZ ddlZddlZddlm	Z	 ddl
mZmZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZ ddlmZmZmZmZmZ ddlmZ  ej         e!          Z"dZ#dZ$ G d de	j%                  Z& G d de	j%                  Z' G d de	j%                  Z( G d de(          Z) G d de	j%                  Z* G d de	j%                  Z+ G d de+          Z, G d de	j%                  Z- G d  d!e	j%                  Z.e+e,d"Z/ G d# d$e	j%                  Z0 G d% d&e	j%                  Z1 G d' d(e	j%                  Z2 G d) d*e          Z3d+Z4d,Z5 ed-e4           G d. d/e3                      Z6 ed0e4           G d1 d2e3                      Z7dS )3zPyTorch ViViT model.    N)OptionalSetTupleUnion)nn)CrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutput)PreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)add_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings	torch_int   )VivitConfigzgoogle/vivit-b-16x2-kinetics400r   c                   0     e Zd ZdZ fdZddefdZ xZS )VivitTubeletEmbeddingsa  
    Construct Vivit Tubelet embeddings.

    This module turns a batch of videos of shape (batch_size, num_frames, num_channels, height, width) into a tensor of
    shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size[0]) * (height // tubelet_size[1]) *
    (width // tubelet_size[2]).
    c                    t                                                       |j        | _        |j        | _        |j        | _        | j        | j        d         z  | j        | j        d         z  z  | j        | j        d         z  z  | _        |j        | _        t          j
        |j        |j        |j        |j                  | _        d S )N   r   r   )kernel_sizestride)super__init__
num_frames
image_sizetubelet_size
patch_sizenum_patcheshidden_size	embed_dimr   Conv3dnum_channels
projectionselfconfig	__class__s     d/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/models/vivit/modeling_vivit.pyr    zVivitTubeletEmbeddings.__init__8   s     + + -_ 22$/!"446$/!"446 	
  +)!3AT]c]p
 
 
    Finterpolate_pos_encodingc                 b   |j         \  }}}}}|sH|| j        k    s|| j        k    r2t          d| d| d| j        d          d| j        d          d	          |                    ddddd	          }|                     |          }|                    d                              dd          }|S )
NzImage image size (*z) doesn't match model (r   r   z).r   r
      )shaper"   
ValueErrorpermuter*   flatten	transpose)	r,   pixel_valuesr1   
batch_sizer!   r)   heightwidthxs	            r/   forwardzVivitTubeletEmbeddings.forwardH   s    >J>P;
Jfe' 	Vt-F-F%SWSbJbJbAV A Ae A A_Q'A A*./!*<A A A   $++Aq!Q::OOL)) IIaLL""1a((r0   F)__name__
__module____qualname____doc__r    boolr?   __classcell__r.   s   @r/   r   r   -   sb         
 
 
 
 
  d        r0   r   c                   \     e Zd ZdZ fdZdej        dededej        fdZdd	e	fd
Z
 xZS )VivitEmbeddingsz
    Vivit Embeddings.

    Creates embeddings from a video using VivitTubeletEmbeddings, adds CLS token and positional embeddings.
    c                    t                                                       t          j        t	          j        dd|j                            | _        t          |          | _	        t          j        t	          j        d| j	        j
        dz   |j                            | _        t          j        |j                  | _        |j        dd          | _        || _        d S )Nr   )r   r    r   	Parametertorchzerosr&   	cls_tokenr   patch_embeddingsr%   position_embeddingsDropouthidden_dropout_probdropoutr#   r$   r-   r+   s     r/   r    zVivitEmbeddings.__init__a   s    ek!Q8J&K&KLL 6v > >#%<K40<q@&BTUU$
 $
  z&"<== -abb1r0   
embeddingsr<   r=   returnc                    |j         d         dz
  }| j        j         d         dz
  }t          j                                        s||k    r||k    r| j        S | j        ddddf         }| j        ddddf         }|j         d         }|| j        d         z  }	|| j        d         z  }
t          |dz            }|                    d|||          }|                    dddd          }t          j
                            ||	|
fdd	
          }|                    dddd                              dd|          }t          j        ||fd          S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   Nr   g      ?r
   r   bicubicF)sizemodealign_cornersdim)r5   rP   rL   jit
is_tracingr$   r   reshaper7   r   
functionalinterpolateviewcat)r,   rT   r<   r=   r%   num_positionsclass_pos_embedpatch_pos_embedr]   
new_height	new_widthsqrt_num_positionss               r/   r1   z(VivitEmbeddings.interpolate_pos_encodingo   s|    !&q)A-06q9A= y##%% 	,+*F*F6UZ??++2111bqb592111abb59r"tq11
T_Q//	&}c'9::)11!5GI[]`aa)11!Q1==-33i(	 4 
 
 *11!Q1==BB1b#NNy/?;CCCCr0   Fr1   c                 0   |j         \  }}}}}|                     ||          }| j                            |ddg          }	t	          j        |	|fd          }|r||                     |||          z   }n
|| j        z   }|                     |          }|S )Nr1   r   r\   )	r5   rO   rN   tilerL   rd   r1   rP   rS   )
r,   r:   r1   r;   r!   r)   r<   r=   rT   
cls_tokenss
             r/   r?   zVivitEmbeddings.forward   s    >J>P;
Jfe**<Rj*kk
^((*a);<<
Y
J7Q???
 $ 	?#d&C&CJPVX]&^&^^JJ#d&>>J\\*--
r0   r@   )rA   rB   rC   rD   r    rL   Tensorintr1   rE   r?   rF   rG   s   @r/   rI   rI   Z   s             &D5< &D &DUX &D]b]i &D &D &D &DP d        r0   rI   c            
            e Zd Zdeddf fdZdej        dej        fdZ	 ddeej                 d	e	de
eej        ej        f         eej                 f         fd
Z xZS )VivitSelfAttentionr-   rU   Nc                    t                                                       |j        |j        z  dk    r1t	          |d          s!t          d|j        f d|j         d          |j        | _        t          |j        |j        z            | _        | j        | j        z  | _        t          j
        |j        | j        |j                  | _        t          j
        |j        | j        |j                  | _        t          j
        |j        | j        |j                  | _        t          j        |j                  | _        d S )Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .)bias)r   r    r&   num_attention_headshasattrr6   rp   attention_head_sizeall_head_sizer   Linearqkv_biasquerykeyvaluerQ   attention_probs_dropout_probrS   r+   s     r/   r    zVivitSelfAttention.__init__   s1    ::a??PVXhHiHi?76#5"7 7 737 7 7  
 $*#= #&v'9F<V'V#W#W !58PPYv143EFO\\\
9V/1C&/ZZZYv143EFO\\\
z&"EFFr0   r>   c                     |                                 d d         | j        | j        fz   }|                    |          }|                    dddd          S )NrW   r   r   r   r
   )rY   rw   ry   rc   r7   )r,   r>   new_x_shapes      r/   transpose_for_scoresz'VivitSelfAttention.transpose_for_scores   sP    ffhhssmt'?AY&ZZFF;yyAq!$$$r0   F	head_maskoutput_attentionsc                    |                      |          }|                     |                     |                    }|                     |                     |                    }|                     |          }t	          j        ||                    dd                    }|t          j        | j	                  z  }t          j                            |d          }	|                     |	          }	||	|z  }	t	          j        |	|          }
|
                    dddd                                          }
|
                                d d         | j        fz   }|
                    |          }
|r|
|	fn|
f}|S )NrW   r\   r   r   r   r
   )r}   r   r~   r   rL   matmulr9   mathsqrtry   r   ra   softmaxrS   r7   
contiguousrY   rz   rc   )r,   hidden_statesr   r   mixed_query_layer	key_layervalue_layerquery_layerattention_scoresattention_probscontext_layernew_context_layer_shapeoutputss                r/   r?   zVivitSelfAttention.forward   sr    !JJ}55--dhh}.E.EFF	//

=0I0IJJ//0ABB !<Y5H5HR5P5PQQ+di8P.Q.QQ -//0@b/II ,,77  -	9O_kBB%--aAq99DDFF"/"4"4"6"6ss";t?Q>S"S%**+BCC6G]=/22mM]r0   NF)rA   rB   rC   r   r    rL   ro   r   r   rE   r   r   r?   rF   rG   s   @r/   rr   rr      s        G{ Gt G G G G G G$%el %u| % % % % bg! !(0(>!Z^!	uU\5</0%2EE	F! ! ! ! ! ! ! !r0   rr   c            
            e Zd Zdeddf fdZ	 d	deej                 dede	e
ej        ej        f         e
ej                 f         f fdZ xZS )
VivitSdpaSelfAttentionr-   rU   Nc                 b    t                                          |           |j        | _        d S N)r   r    r   r+   s     r/   r    zVivitSdpaSelfAttention.__init__   s,       ,2,O)))r0   Fr   r   c           	         |s|=t                               d           t                                          |||          S |                     |          }|                     |                     |                    }|                     |                     |                    }|                     |          }t          j	        j
                            ||||| j        r| j        nddd           }|                    dddd                                          }|                                d d	         | j        fz   }	|                    |	          }|d fS )
Na  VivitSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.        F)	is_causalscaler   r   r   r
   r   )loggerwarning_oncer   r?   r}   r   r~   r   rL   r   ra   scaled_dot_product_attentiontrainingr   r7   r   rY   rz   rc   )r,   r   r   r   r   r   r   r   r   r   r.   s             r/   r?   zVivitSdpaSelfAttention.forward   sk     		 5d   77??!   !JJ}55--dhh}.E.EFF	//

=0I0IJJ//0ABB+HH15GD--C I 
 
 &--aAq99DDFF"/"4"4"6"6ss";t?Q>S"S%**+BCCd""r0   r   )rA   rB   rC   r   r    r   rL   ro   rE   r   r   r?   rF   rG   s   @r/   r   r      s        P{ Pt P P P P P P
 bg$# $#(0(>$#Z^$#	uU\5</0%2EE	F$# $# $# $# $# $# $# $# $# $#r0   r   c                   ^     e Zd ZdZdeddf fdZdej        dej        dej        fdZ xZ	S )	VivitSelfOutputz
    The residual connection is defined in VivitLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r-   rU   Nc                     t                                                       t          j        |j        |j                  | _        t          j        |j                  | _        d S r   )	r   r    r   r{   r&   denserQ   rR   rS   r+   s     r/   r    zVivitSelfOutput.__init__  sJ    Yv163EFF
z&"<==r0   r   input_tensorc                 Z    |                      |          }|                     |          }|S r   r   rS   r,   r   r   s      r/   r?   zVivitSelfOutput.forward  s*    

=11]33r0   )
rA   rB   rC   rD   r   r    rL   ro   r?   rF   rG   s   @r/   r   r     s         
>{ >t > > > > > >
U\  RWR^        r0   r   c                        e Zd Zdeddf fdZdee         ddfdZ	 	 ddej	        d	e
ej	                 d
edeeej	        ej	        f         eej	                 f         fdZ xZS )VivitAttentionr-   rU   Nc                     t                                                       t          |          | _        t	          |          | _        t                      | _        d S r   )r   r    rr   	attentionr   outputsetpruned_headsr+   s     r/   r    zVivitAttention.__init__(  sI    +F33%f--EEr0   headsc                    t          |          dk    rd S t          || j        j        | j        j        | j                  \  }}t          | j        j        |          | j        _        t          | j        j        |          | j        _        t          | j        j	        |          | j        _	        t          | j
        j        |d          | j
        _        | j        j        t          |          z
  | j        _        | j        j        | j        j        z  | j        _        | j                            |          | _        d S )Nr   r   r\   )lenr   r   rw   ry   r   r   r}   r~   r   r   r   rz   union)r,   r   indexs      r/   prune_headszVivitAttention.prune_heads.  s   u::??F74>5t~7Y[_[l
 
u
  2$.2FNN/0BEJJ1$.2FNN.t{/@%QOOO .2^-ORUV[R\R\-\*'+~'IDNLn'n$ -33E::r0   Fr   r   r   c                     |                      |||          }|                     |d         |          }|f|dd          z   }|S )Nr   r   )r   r   )r,   r   r   r   self_outputsattention_outputr   s          r/   r?   zVivitAttention.forward@  sM     ~~mY@QRR;;|AFF#%QRR(88r0   r   )rA   rB   rC   r   r    r   rp   r   rL   ro   r   rE   r   r   r?   rF   rG   s   @r/   r   r   '  s        "{ "t " " " " " ";S ;d ; ; ; ;* -1"'	 | EL)  	
 
uU\5</0%2EE	F       r0   r   c                   (     e Zd Zdeddf fdZ xZS )VivitSdpaAttentionr-   rU   Nc                 r    t                                          |           t          |          | _        d S r   )r   r    r   r   r+   s     r/   r    zVivitSdpaAttention.__init__P  s.       /77r0   )rA   rB   rC   r   r    rF   rG   s   @r/   r   r   O  sK        8{ 8t 8 8 8 8 8 8 8 8 8 8r0   r   c                   $     e Zd Z fdZd Z xZS )VivitIntermediatec                 J   t                                                       t          j        |j        |j                  | _        t          j        |j                  | _	        t          |j        t                    rt          |j                 | _        d S |j        | _        d S r   )r   r    r   r{   r&   intermediate_sizer   rQ   rR   rS   
isinstance
hidden_actstrr   intermediate_act_fnr+   s     r/   r    zVivitIntermediate.__init__V  s    Yv163KLL
z&"<==f'-- 	9'-f.?'@D$$$'-'8D$$$r0   c                     |                      |          }|                     |          }|                     |          }|S r   )r   r   rS   )r,   r   s     r/   r?   zVivitIntermediate.forward_  s=    

=1100??]33r0   rA   rB   rC   r    r?   rF   rG   s   @r/   r   r   U  sG        9 9 9 9 9      r0   r   c                   $     e Zd Z fdZd Z xZS )VivitOutputc                     t                                                       t          j        |j        |j                  | _        t          j        |j                  | _	        d S r   )
r   r    r   r{   r   r&   r   rQ   rR   rS   r+   s     r/   r    zVivitOutput.__init__h  sJ    Yv79KLL
z&"<==r0   c                 d    |                      |          }|                     |          }||z   }|S r   r   r   s      r/   r?   zVivitOutput.forwardm  s4    

=11]33%4r0   r   rG   s   @r/   r   r   g  sG        > > > > >
      r0   r   )eagersdpac                   *     e Zd ZdZ fdZddZ xZS )
VivitLayerzNThis corresponds to the EncoderBlock class in the scenic/vivit implementation.c                    t                                                       |j        | _        d| _        t	          |j                 |          | _        t          |          | _        t          |          | _
        t          j        |j        |j                  | _        t          j        |j        |j                  | _        d S )Nr   eps)r   r    chunk_size_feed_forwardseq_len_dimVIVIT_ATTENTION_CLASSES_attn_implementationr   r   intermediater   r   r   	LayerNormr&   layer_norm_epslayernorm_beforelayernorm_afterr+   s     r/   r    zVivitLayer.__init__  s    '-'E$01LMfUU-f55!&)) "V-?VEZ [ [ [!|F,>FDYZZZr0   NFc                    |                      |                     |          ||          }|d         }|dd          }||z   }|                     |          }|                     |          }|                     ||          }|f|z   }|S )N)r   r   r   )r   r   r   r   r   )r,   r   r   r   self_attention_outputsr   r   layer_outputs           r/   r?   zVivitLayer.forward  s    !%!!-00/	 "0 "
 "
 2!4(, )=8 ++M::((66 {{<??/G+r0   r   )rA   rB   rC   rD   r    r?   rF   rG   s   @r/   r   r   }  sW        XX[ [ [ [ [       r0   r   c                   .     e Zd Z fdZ	 	 	 	 ddZ xZS )VivitEncoderc                     t                                                       | _        t          j        fdt          j                  D                       | _        d| _        d S )Nc                 .    g | ]}t                    S  )r   ).0_r-   s     r/   
<listcomp>z)VivitEncoder.__init__.<locals>.<listcomp>  s!    #`#`#`1Jv$6$6#`#`#`r0   F)	r   r    r-   r   
ModuleListrangenum_hidden_layerslayergradient_checkpointingr+   s    `r/   r    zVivitEncoder.__init__  s`    ]#`#`#`#`fF^@_@_#`#`#`aa
&+###r0   NFTc                    |rdnd }|rdnd }t          | j                  D ]h\  }}	|r||fz   }|||         nd }
| j        r%| j        r|                     |	j        ||
|          }n |	||
|          }|d         }|r||d         fz   }i|r||fz   }|st          d |||fD                       S t          |||          S )Nr   r   r   c              3      K   | ]}||V  	d S r   r   )r   vs     r/   	<genexpr>z'VivitEncoder.forward.<locals>.<genexpr>  s(      mmq_`_l_l_l_l_lmmr0   )last_hidden_stater   
attentions)	enumerater   r   r   _gradient_checkpointing_func__call__tupler   )r,   r   r   r   output_hidden_statesreturn_dictall_hidden_statesall_self_attentionsilayer_modulelayer_head_masklayer_outputss               r/   r?   zVivitEncoder.forward  sI    #7@BBD$5?bb4(44 	P 	POA|# I$58H$H!.7.CillO* `t} ` $ A A )!#%	! ! !-]OM^ _ _)!,M  P&9]1=M<O&O# 	E 1]4D D 	nmm]4EGZ$[mmmmmm++*
 
 
 	
r0   )NFFTr   rG   s   @r/   r   r     sZ        , , , , , ")
 )
 )
 )
 )
 )
 )
 )
r0   r   c                   $     e Zd Z fdZd Z xZS )VivitPoolerc                     t                                                       t          j        |j        |j                  | _        t          j                    | _        d S r   )r   r    r   r{   r&   r   Tanh
activationr+   s     r/   r    zVivitPooler.__init__  sC    Yv163EFF
'))r0   c                 r    |d d df         }|                      |          }|                     |          }|S )Nr   )r   r  )r,   r   first_token_tensorpooled_outputs       r/   r?   zVivitPooler.forward  s@     +111a40

#56666r0   r   rG   s   @r/   r   r     sG        $ $ $ $ $
      r0   r   c                   0    e Zd ZdZeZdZdZdZg Z	dZ
d ZdS )VivitPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    vivitr:   Tc                 "   t          |t          j        t          j        f          rT|j        j                            d| j        j                   |j	         |j	        j        
                                 dS dS t          |t          j                  r_|j        j                            d| j        j                   |j        +|j        j        |j                 
                                 dS dS t          |t          j                  r?|j	        j        
                                 |j        j                            d           dS t          |t          j                  r(|j                            d| j        j                   dS dS )zInitialize the weightsr   )meanstdNg      ?)r   r   r{   r(   weightdatanormal_r-   initializer_rangerv   zero_	Embeddingpadding_idxr   fill_rK   )r,   modules     r/   _init_weightsz"VivitPreTrainedModel._init_weights  sp   fry")455 	M M&&CT[5R&SSS{& &&((((( '&-- 	MM&&CT[5R&SSS!-"6#56<<>>>>> .--- 	MK""$$$M$$S)))))-- 	MKSdk.KLLLLL	M 	Mr0   N)rA   rB   rC   rD   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpar  r   r0   r/   r  r    sU         
 L$O&*#NM M M M Mr0   r  aG  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`VivitConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`VivitImageProcessor`]. See
            [`VivitImageProcessor.preprocess`] for details.

        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        interpolate_pos_encoding (`bool`, *optional*, `False`):
            Whether to interpolate the pre-trained position encodings.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z_The bare ViViT Transformer model outputting raw hidden-states without any specific head on top.c                       e Zd Zd fd	Zd Zd Z ee           ee	e
          	 	 	 	 	 	 ddeej                 d	eej                 d
ee         dee         dedee         deeej                 e	f         fd                        Z xZS )
VivitModelTc                 J   t                                          |           || _        t          |          | _        t          |          | _        t          j        |j	        |j
                  | _        |rt          |          nd | _        |                                  d S )Nr   )r   r    r-   rI   rT   r   encoderr   r   r&   r   	layernormr   pooler	post_init)r,   r-   add_pooling_layerr.   s      r/   r    zVivitModel.__init__/  s       )&11#F++f&8f>STTT->Hk&)))D 	r0   c                     | j         j        S r   )rT   rO   )r,   s    r/   get_input_embeddingszVivitModel.get_input_embeddings<  s    //r0   c                     |                                 D ]/\  }}| j        j        |         j                            |           0dS )z
        Prunes heads of the model.

        Args:
            heads_to_prune:
                dict of {layer_num: list of heads to prune in this layer}
        N)itemsr   r   r   r   )r,   heads_to_pruner   r   s       r/   _prune_headszVivitModel._prune_heads?  sU     +0022 	C 	CLE5Lu%/;;EBBBB	C 	Cr0   output_typer  NFr:   r   r   r   r1   r   rU   c                    ||n| j         j        }||n| j         j        }||n| j         j        }|t	          d          |                     || j         j                  }|                     ||          }|                     |||||          }|d         }	| 	                    |	          }	| j
        | 
                    |	          nd}
|s|	|
f|dd         z   S t          |	|
|j        |j                  S )a  
        Returns:

        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import VivitImageProcessor, VivitModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitModel.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 3137, 768]
        ```Nz You have to specify pixel_valuesrl   )r   r   r   r   r   r   )r   pooler_outputr   r   )r-   r   r   use_return_dictr6   get_head_maskr   rT   r   r!  r"  r   r   r   )r,   r:   r   r   r   r1   r   embedding_outputencoder_outputssequence_outputr  s              r/   r?   zVivitModel.forwardJ  s>   n 2C1N--TXT_Tq$8$D  $+Jj 	 &1%<kk$+B]?@@@&&y$+2OPP	??<Rj?kk,,/!5# ' 
 
 *!,..998<8OO444UY 	J#]3oabb6III)-')7&1	
 
 
 	
r0   )T)NNNNFN)rA   rB   rC   r    r&  r*  r   VIVIT_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr   rL   FloatTensorrE   r   r   r?   rF   rG   s   @r/   r  r  *  sF       
     0 0 0	C 	C 	C +*+ABB+ETcddd 5915,0/3).&*u
 u
u01u
 E-.u
 $D>	u

 'tnu
 #'u
 d^u
 
uU&')CC	Du
 u
 u
 ed CBu
 u
 u
 u
 u
r0   r  a  
    ViViT Transformer model with a video classification head on top (a linear layer on top of the final hidden state of the
[CLS] token) e.g. for Kinetics-400.

    <Tip>

        Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
        setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
        position embeddings to the higher resolution.

    </Tip>
    c                   *    e Zd Z fdZ ee           eee          	 	 	 	 	 	 	 dde	e
j                 de	e
j                 de	e
j                 de	e         d	e	e         d
ede	e         deee
j                 ef         fd                        Z xZS )VivitForVideoClassificationc                 :   t                                          |           |j        | _        t          |d          | _        |j        dk    rt          j        |j        |j                  nt          j                    | _	        | 
                                 d S )NF)r$  r   )r   r    
num_labelsr  r	  r   r{   r&   Identity
classifierr#  r+   s     r/   r    z$VivitForVideoClassification.__init__  s        +%@@@
 OUN_bcNcNc")F$68IJJJikitiviv 	r0   r+  NFr:   r   labelsr   r   r1   r   rU   c                 F   ||n| j         j        }|                     ||||||          }|d         }	|                     |	dddddf                   }
d}|| j        dk    rAt                      } ||
                    d          |                    d                    }nFt                      } ||
                    d| j                  |                    d                    }|s|
f|dd         z   }||f|z   n|S t          ||
|j	        |j
                  S )a(  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Returns:

        Examples:

        ```python
        >>> import av
        >>> import numpy as np
        >>> import torch

        >>> from transformers import VivitImageProcessor, VivitForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 32 frames
        >>> indices = sample_frame_indices(clip_len=32, frame_sample_rate=4, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container=container, indices=indices)

        >>> image_processor = VivitImageProcessor.from_pretrained("google/vivit-b-16x2-kinetics400")
        >>> model = VivitForVideoClassification.from_pretrained("google/vivit-b-16x2-kinetics400")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        LABEL_116
        ```N)r   r   r   r1   r   r   r   rW   r   )losslogitsr   r   )r-   r/  r	  r<  r:  r	   rc   r   r   r   r   )r,   r:   r   r=  r   r   r1   r   r   r3  r@  r?  loss_fctr   s                 r/   r?   z#VivitForVideoClassification.forward  sL   @ &1%<kk$+B]**/!5%=#  
 
 "!*Aqqq!9::!##"99xBRAA+--xB @ @&++b//RR 	FY,F)-)9TGf$$vE$!/)	
 
 
 	
r0   )NNNNNFN)rA   rB   rC   r    r   r4  r   r   r5  r   rL   r6  
LongTensorrE   r   r   r?   rF   rG   s   @r/   r8  r8    s5        
 
 
 
 
 +*+ABB+@___ 5915-1,0/3).&*@
 @
u01@
 E-.@
 )*	@

 $D>@
 'tn@
 #'@
 d^@
 
uU&')>>	?@
 @
 @
 `_ CB@
 @
 @
 @
 @
r0   r8  )8rD   r   typingr   r   r   r   rL   torch.utils.checkpointr   torch.nnr   r	   activationsr   modeling_outputsr   r   r   modeling_utilsr   pytorch_utilsr   r   utilsr   r   r   r   r   configuration_vivitr   
get_loggerrA   r   _CHECKPOINT_FOR_DOCr5  Moduler   rI   rr   r   r   r   r   r   r   r   r   r   r   r  VIVIT_START_DOCSTRINGr4  r  r8  r   r0   r/   <module>rP     s{      . . . . . . . . . . . .            . . . . . . . . ! ! ! ! ! ! b b b b b b b b b b - - - - - - Q Q Q Q Q Q Q Q              - , , , , , 
	H	%	%7 * * * * *RY * * *ZL L L L Lbi L L L`9 9 9 9 9 9 9 9z)# )# )# )# )#/ )# )# )#Z    bi   &$ $ $ $ $RY $ $ $P8 8 8 8 8 8 8 8    	   $    ")   "   $ $ $ $ $ $ $ $N0
 0
 0
 0
 0
29 0
 0
 0
f    ")   M M M M M? M M M@	  2 e S
 S
 S
 S
 S
% S
 S
	 S
l   O
 O
 O
 O
 O
"6 O
 O
 O
 O
 O
r0   