
    ~Vji                     <   d dl mZ d dlmZ d dlmZ d dlZd dlZd dlm	Z	m
Z
 e G d d                      Z ed ee	d	
          d          Zde_         ed ee
g d          d          Zde_         ed ee
g d          d          Zde_        dS )    )	dataclass)partial)CallableN)conv_tasnet_basehdemucs_highc                       e Zd ZU dZeed<   eg ej        j	        f         ed<   e
ed<   ede
fd            Zdej        j	        fdZdS )	SourceSeparationBundleu  Dataclass that bundles components for performing source separation.

    Example
        >>> import torchaudio
        >>> from torchaudio.pipelines import CONVTASNET_BASE_LIBRI2MIX
        >>> import torch
        >>>
        >>> # Build the separation model.
        >>> model = CONVTASNET_BASE_LIBRI2MIX.get_model()
        >>> 100%|███████████████████████████████|19.1M/19.1M [00:04<00:00, 4.93MB/s]
        >>>
        >>> # Instantiate the test set of Libri2Mix dataset.
        >>> dataset = torchaudio.datasets.LibriMix("/home/datasets/", subset="test")
        >>>
        >>> # Apply source separation on mixture audio.
        >>> for i, data in enumerate(dataset):
        >>>     sample_rate, mixture, clean_sources = data
        >>>     # Make sure the shape of input suits the model requirement.
        >>>     mixture = mixture.reshape(1, 1, -1)
        >>>     estimated_sources = model(mixture)
        >>>     score = si_snr_pit(estimated_sources, clean_sources) # for demonstration
        >>>     print(f"Si-SNR score is : {score}.)
        >>>     break
        >>> Si-SNR score is : 16.24.
        >>>
    _model_path_model_factory_func_sample_ratereturnc                     | j         S )zSSample rate of the audio that the model is trained on.

        :type: int
        )r   )selfs    j/root/voice-cloning/.venv/lib/python3.11/site-packages/torchaudio/pipelines/_source_separation_pipeline.pysample_ratez"SourceSeparationBundle.sample_rate,   s           c                     |                                  }t          j                            | j                  }t          j        |          }|                    |           |                                 |S )z3Construct the model and load the pretrained weight.)	r   
torchaudioutils_download_assetr
   torchloadload_state_dicteval)r   modelpath
state_dicts       r   	get_modelz SourceSeparationBundle.get_model4   sa    ((**//0@AAZ%%
j)))

r   N)__name__
__module____qualname____doc__str__annotations__r   r   nnModuleintpropertyr   r    r   r   r	   r	      s          6 !"eho"56666!S ! ! ! X!58?      r   r	   z$models/conv_tasnet_base_libri2mix.pt   )num_sourcesi@  )r
   r   r   a  Pre-trained Source Separation pipeline with *ConvTasNet*
:cite:`Luo_2019` trained on *Libri2Mix dataset* :cite:`cosentino2020librimix`.

The source separation model is constructed by :func:`~torchaudio.models.conv_tasnet_base`
and is trained using the training script ``lightning_train.py``
`here <https://github.com/pytorch/audio/tree/release/0.12/examples/source_separation/>`__
with default arguments.

Please refer to :class:`SourceSeparationBundle` for usage instructions.
zmodels/hdemucs_high_trained.pt)drumsbassothervocals)sourcesiD  a	  Pre-trained music source separation pipeline with
*Hybrid Demucs* :cite:`defossez2021hybrid` trained on both training and test sets of
MUSDB-HQ :cite:`MUSDB18HQ` and an additional 150 extra songs from an internal database
that was specifically produced for Meta.

The model is constructed by :func:`~torchaudio.models.hdemucs_high`.

Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.

Please refer to :class:`SourceSeparationBundle` for usage instructions.
z#models/hdemucs_high_musdbhq_only.pta  Pre-trained music source separation pipeline with
*Hybrid Demucs* :cite:`defossez2021hybrid` trained on the training set of MUSDB-HQ :cite:`MUSDB18HQ`.

The model is constructed by :func:`~torchaudio.models.hdemucs_high`.
Training was performed in the original HDemucs repository `here <https://github.com/facebookresearch/demucs/>`__.

Please refer to :class:`SourceSeparationBundle` for usage instructions.
)dataclassesr   	functoolsr   typingr   r   r   torchaudio.modelsr   r   r	   CONVTASNET_BASE_LIBRI2MIXr"   HDEMUCS_HIGH_MUSDB_PLUSHDEMUCS_HIGH_MUSDBr)   r   r   <module>r8      si   ! ! ! ! ! !                  < < < < < < < < / / / / / / / /d 326 0a@@@   
	%  ! 1006Z6Z6Z[[[   

#   ,+56Z6Z6Z[[[   
    r   