
    g'                        d dl Z d dlmZ d dlZd dlZddlmZmZm	Z	m
Z
 ddlmZmZ  e            rddlmZ  e
j        e          Zded	ed
ej        fdZ e ed                     G d de                      ZdS )    N)Union   )add_end_docstringsis_torch_availableis_torchaudio_availablelogging   )Pipelinebuild_pipeline_init_args),MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMESbpayloadsampling_ratereturnc                 ~   | }d}d}dddd|d|d|d	d
ddg}	 t          j        |t           j        t           j                  }n# t          $ r t	          d          w xY w|                    |           }|d         }t          j        |t          j                  }	|	j	        d         dk    rt	          d          |	S )z?
    Helper function to read an audio file through ffmpeg.
    1f32leffmpegz-izpipe:0z-acz-arz-fz-hide_bannerz	-loglevelquietzpipe:1)stdinstdoutzFffmpeg was not found but is required to load audio files from filenamer   zMalformed soundfile)

subprocessPopenPIPEFileNotFoundError
ValueErrorcommunicatenp
frombufferfloat32shape)
r   r   aracformat_for_conversionffmpeg_commandffmpeg_processoutput_stream	out_bytesaudios
             g/var/www/html/ai-engine/env/lib/python3.11/site-packages/transformers/pipelines/audio_classification.pyffmpeg_readr*      s     	B	B#

N c#).
XbXghhh c c cabbbc"..x88Ma IM)RZ00E{1~.///Ls   +A AT)has_feature_extractorc                   j     e Zd ZdZ fdZdeej        ee	f         f fdZ
ddZd Zd ZddZ xZS )AudioClassificationPipelinea  
    Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a
    raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio
    formats.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks")
    >>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
    [{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)


    This pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"audio-classification"`.

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=audio-classification).
    c                     d|d<    t                      j        |i | | j        dk    rt          d| j         d          |                     t                     d S )N   top_kptzThe z is only available in PyTorch.)super__init__	frameworkr   	__class__check_model_typer   )selfargskwargsr5   s      r)   r3   z$AudioClassificationPipeline.__init__]   sl    w$)&)))>T!!RDNRRRSSSJKKKKK    inputsc                 8     t                      j        |fi |S )a  
        Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
        information.

        Args:
            inputs (`np.ndarray` or `bytes` or `str` or `dict`):
                The inputs is either :
                    - `str` that is the filename of the audio file, the file will be read at the correct sampling rate
                      to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
                    - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
                      same way.
                    - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
                        Raw audio at the correct sampling rate (no further check will be done)
                    - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
                      pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int,
                      "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or
                      `"array"` is used to denote the raw audio waveform.
            top_k (`int`, *optional*, defaults to None):
                The number of top labels that will be returned by the pipeline. If the provided number is `None` or
                higher than the number of labels available in the model configuration, it will default to the number of
                labels.
            function_to_apply(`str`, *optional*, defaults to "softmax"):
                The function to apply to the model output. By default, the pipeline will apply the softmax function to
                the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
                built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
                post-processing.

        Return:
            A list of `dict` with the following keys:

            - **label** (`str`) -- The label predicted.
            - **score** (`float`) -- The corresponding probability.
        )r2   __call__)r7   r;   r9   r5   s      r)   r=   z$AudioClassificationPipeline.__call__g   s%    L  uww11&111r:   Nc                     i }|+|| j         j        j        k    r| j         j        j        }||d<   ||dvrt          d| d          ||d<   nd|d<   i i |fS )Nr0   )softmaxsigmoidnonez'Invalid value for `function_to_apply`: z2. Valid options are ['softmax', 'sigmoid', 'none']function_to_applyr?   )modelconfig
num_labelsr   )r7   r0   rB   r9   postprocess_paramss        r)   _sanitize_parametersz0AudioClassificationPipeline._sanitize_parameters   s    tz(333
)4*/w'( (FFF G>O G G G   7H2336?232)))r:   c                    t          |t                    r|                    d          s|                    d          rt          j        |          j        }n<t          |d          5 }|                                }d d d            n# 1 swxY w Y   t          |t                    rt          || j
        j                  }t          |t                    rd|v rd|v sd|v st          d          |                    dd           }|,|                    dd            |                    dd           }|                    d          }|}|| j
        j        k    rnd	d l}t!                      rd	d
lm} nt'          d          |                    |                    |          || j
        j                                                  }t          |t.          j                  st3          d          t5          |j                  dk    rt          d          | 
                    || j
        j        d          }|S )Nzhttp://zhttps://rbr   rawarrayzWhen passing a dictionary to AudioClassificationPipeline, the dict needs to contain a "raw" key containing the numpy array representing the audio and a "sampling_rate" key, containing the sampling_rate associated with that arraypathr   )
functionalztorchaudio is required to resample audio samples in AudioClassificationPipeline. The torchaudio package can be installed through: `pip install torchaudio`.z"We expect a numpy ndarray as inputr	   zFWe expect a single channel audio input for AudioClassificationPipeliner1   )r   return_tensors)
isinstancestr
startswithrequestsgetcontentopenreadbytesr*   feature_extractorr   dictr   poptorchr   
torchaudiorM   ImportErrorresample
from_numpynumpyr   ndarray	TypeErrorlenr    )r7   r;   f_inputsin_sampling_rater[   F	processeds           r)   
preprocessz&AudioClassificationPipeline.preprocess   s   fc"" 	&  ++ &v/@/@/L/L & "f--5&$'' &1VVXXF& & & & & & & & & & & & & & & fe$$ 	O )?)MNNFfd## 	 $v--5F??gQWFWFW N   jj--G

64((( **Wd33%zz/::F4#9#GGG*,, :::::::%e  
 $$V,,.>@V@d %''  &"*-- 	B@AAAv|!!efff**$"8"FW[ + 
 
	 s   *BBBc                       | j         di |}|S )N )rC   )r7   model_inputsmodel_outputss      r)   _forwardz$AudioClassificationPipeline._forward   s    "
22\22r:   r/   r?   c                 t    |dk    r!|j         d                             d          }n3|dk    r |j         d                                         }n|j         d         }|                    |          \  }}|                                }|                                } fdt          ||          D             }|S )Nr?   r   r@   c                 J    g | ]\  }}|j         j        j        |         d  S ))scorelabel)rC   rD   id2label).0rr   _idr7   s      r)   
<listcomp>z;AudioClassificationPipeline.postprocess.<locals>.<listcomp>   s5    pppQ[QVX[EDJ,=,Fs,KLLpppr:   )logitsr?   r@   topktolistzip)r7   rm   r0   rB   probsscoresidslabelss   `       r)   postprocessz'AudioClassificationPipeline.postprocess   s    	))!(+33B77EE)++!(+3355EE!(+Ejj''jjllpppp_bcikn_o_opppr:   )NN)r/   r?   )__name__
__module____qualname____doc__r3   r   r   ra   rW   rP   r=   rG   ri   rn   r   __classcell__)r5   s   @r)   r-   r-   B   s         2L L L L L&2bj%,-&2 &2 &2 &2 &2 &2P* * * *$5 5 5n         r:   r-   )r   typingr   r`   r   rR   utilsr   r   r   r   baser
   r   models.auto.modeling_autor   
get_loggerr   loggerrW   intrK   r*   r-   rk   r:   r)   <module>r      s@                  \ \ \ \ \ \ \ \ \ \ \ \ 4 4 4 4 4 4 4 4  YXXXXXX		H	%	%!% ! ! ! ! ! !H ,,4HHHIIg g g g g( g g JIg g gr:   