
    %Vji%                        d Z ddlmZ ddlmZ ddlZddlmZmZ ddl	m
Z
mZmZmZmZmZmZmZ g dZd	efd
Zd	efdZdded	eej                 fdZd	efdZ  ede          e          Zde_         ded	dfdZ  ede          e          Zde_         dded	ej        fdZdej        d	dfdZ dded	dfdZ! G d d          Z"dS )z]
This package introduces support for the current :ref:`accelerator<accelerators>` in python.
    )Optional)
deprecatedN   )	_device_t_get_device_index)empty_cachemax_memory_allocatedmax_memory_reservedmemory_allocatedmemory_reservedmemory_statsreset_accumulated_memory_statsreset_peak_memory_stats)current_acceleratorcurrent_device_idxcurrent_device_indexcurrent_streamr   device_countdevice_indexis_availabler	   r
   r   r   r   r   r   set_device_idxset_device_index
set_streamsynchronizereturnc                  v    t                      } | dS t          j        |           }|                                S )a  Return the number of current :ref:`accelerator<accelerators>` available.

    Returns:
        int: the number of the current :ref:`accelerator<accelerators>` available.
            If there is no available accelerators, return 0.

    .. note:: This API delegates to the device-specific version of `device_count`.
        On CUDA, this API will NOT poison fork if NVML discovery succeeds.
        Otherwise, it will. For more details, see :ref:`multiprocessing-poison-fork-note`.
    Nr   )r   torchget_device_moduler   accmods     T/root/voice-cloning/.venv/lib/python3.11/site-packages/torch/accelerator/__init__.pyr   r   .   s;     

C
{q

!#
&
&C    c                  v    t                      } | dS t          j        |           }|                                S )a  Check if the current accelerator is available at runtime: it was build, all the
    required drivers are available and at least one device is visible.
    See :ref:`accelerator<accelerators>` for details.

    Returns:
        bool: A boolean indicating if there is an available :ref:`accelerator<accelerators>`.

    .. note:: This API delegates to the device-specific version of `is_available`.
        On CUDA, when the environment variable ``PYTORCH_NVML_BASED_CUDA_CHECK=1`` is set,
        this function will NOT poison fork. Otherwise, it will. For more details, see
        :ref:`multiprocessing-poison-fork-note`.

    Example::

        >>> assert torch.accelerator.is_available() "No available accelerators detected."
    NF)r   r   r   r   r   s     r"   r   r   A   s;    * 

C
{u

!#
&
&Cr#   Fcheck_availablec                 n    t           j                                        x}| r| rt                      r|S dS )aJ  Return the device of the accelerator available at compilation time.
    If no accelerator were available at compilation time, returns None.
    See :ref:`accelerator<accelerators>` for details.

    Args:
        check_available (bool, optional): if True, will also do a runtime check to see
            if the device :func:`torch.accelerator.is_available` on top of the compile-time
            check.
            Default: ``False``

    Returns:
        torch.device: return the current accelerator as :class:`torch.device`.

    .. note:: The index of the returned :class:`torch.device` will be ``None``, please use
        :func:`torch.accelerator.current_device_index` to know the current index being used.
        This API does NOT poison fork. For more details, see :ref:`multiprocessing-poison-fork-note`.

    Example::

        >>> # xdoctest:
        >>> # If an accelerator is available, sent the model to it
        >>> model = torch.nn.Linear(2, 2)
        >>> if (current_device := current_accelerator(check_available=True)) is not None:
        >>>     model.to(current_device)
    N)r   _C_accelerator_getAcceleratorr   )r%   r    s     r"   r   r   ^   sA    4 x33555B 	_ 	 	J4r#   c                  >    t           j                                        S )zReturn the index of a currently selected device for the current :ref:`accelerator<accelerators>`.

    Returns:
        int: the index of a currently selected device.
    )r   r'   _accelerator_getDeviceIndex r#   r"   r   r   ~   s     8//111r#   z#Use `current_device_index` instead.)categorya  
    (Deprecated) Return the index of a currently selected device for the current :ref:`accelerator<accelerators>`.

    Returns:
        int: the index of a currently selected device.

    .. warning::

        :func:`torch.accelerator.current_device_idx` is deprecated in favor of :func:`torch.accelerator.current_device_index`
        and will be removed in a future PyTorch release.
    devicec                f    t          | d          }t          j                            |           dS )a   Set the current device index to a given device.

    Args:
        device (:class:`torch.device`, str, int): a given device that must match the current
            :ref:`accelerator<accelerators>` device type.

    .. note:: This function is a no-op if this device index is negative.
    FoptionalN)r   r   r'   _accelerator_setDeviceIndexr-   r   s     r"   r   r      s2     %Ve<<<L	H((66666r#   zUse `set_device_index` instead.a  
    (Deprecated) Set the current device index to a given device.

    Args:
        device (:class:`torch.device`, str, int): a given device that must match the current
            :ref:`accelerator<accelerators>` device type.

    .. warning::

        :func:`torch.accelerator.set_device_idx` is deprecated in favor of :func:`torch.accelerator.set_device_index`
        and will be removed in a future PyTorch release.
    c                b    t          | d          }t          j                            |          S )a  Return the currently selected stream for a given device.

    Args:
        device (:class:`torch.device`, str, int, optional): a given device that must match the current
            :ref:`accelerator<accelerators>` device type. If not given,
            use :func:`torch.accelerator.current_device_index` by default.

    Returns:
        torch.Stream: the currently selected stream for a given device.
    Tr/   )r   r   r'   _accelerator_getStreamr2   s     r"   r   r      s,     %Vd;;;L8**<888r#   streamc                 D    t           j                            |            dS )a  Set the current stream to a given stream.

    Args:
        stream (torch.Stream): a given stream that must match the current :ref:`accelerator<accelerators>` device type.

    .. note:: This function will set the current device index to the device index of the given stream.
    N)r   r'   _accelerator_setStream)r5   s    r"   r   r      s      
H##F+++++r#   c                f    t          | d          }t          j                            |           dS )a  Wait for all kernels in all streams on the given device to complete.

    Args:
        device (:class:`torch.device`, str, int, optional): device for which to synchronize. It must match
            the current :ref:`accelerator<accelerators>` device type. If not given,
            use :func:`torch.accelerator.current_device_index` by default.

    .. note:: This function is a no-op if the current :ref:`accelerator<accelerators>` is not initialized.

    Example::

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
        >>> assert torch.accelerator.is_available() "No available accelerators detected."
        >>> start_event = torch.Event(enable_timing=True)
        >>> end_event = torch.Event(enable_timing=True)
        >>> start_event.record()
        >>> tensor = torch.randn(100, device=torch.accelerator.current_accelerator())
        >>> sum = torch.sum(tensor)
        >>> end_event.record()
        >>> torch.accelerator.synchronize()
        >>> elapsed_time_ms = start_event.elapsed_time(end_event)
    Tr/   N)r   r   r'   _accelerator_synchronizeDevicer2   s     r"   r   r      s2    . %Vd;;;L	H++L99999r#   c                   F    e Zd ZdZdee         ddfdZd	dZdeddfdZ	dS )
r   a  Context manager to set the current device index for the current :ref:`accelerator<accelerators>`.
    Temporarily changes the current device index to the specified value for the duration
    of the context, and automatically restores the previous device index when exiting
    the context.

    Args:
        device (Optional[int]): a given device index to temporarily set. If None,
            no device index switching occurs.

    Examples:

        >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
        >>> # Set device 0 as the current device temporarily
        >>> with torch.accelerator.device_index(0):
        ...     # Code here runs with device 0 as the current device
        ...     pass
        >>> # Original device is now restored
        >>> # No-op when None is passed
        >>> with torch.accelerator.device_index(None):
        ...     # No device switching occurs
        ...     pass
    r-   r   Nc                "    || _         d| _        d S )N)idxprev_idx)selfr-   s     r"   __init__zdevice_index.__init__  s    r#   c                 j    | j         +t          j                            | j                   | _        d S d S N)r=   r   r'   _accelerator_exchangeDevicer>   )r?   s    r"   	__enter__zdevice_index.__enter__
  s/    8!H@@JJDMMM  r#   exc_infoc                 `    | j         &t          j                            | j                   d S d S rB   )r=   r   r'    _accelerator_maybeExchangeDevicer>   )r?   rE   s     r"   __exit__zdevice_index.__exit__  s0    8H55dmDDDDD  r#   )r   N)
__name__
__module____qualname____doc__r   intr@   rD   objectrH   r+   r#   r"   r   r      s         .x} D    K K K KE& ET E E E E E Er#   r   )FrB   )#rL   typingr   typing_extensionsr   r   _utilsr   r   memoryr   r	   r
   r   r   r   r   r   __all__rM   r   boolr   r-   r   r   FutureWarningr   r   r   Streamr   r   r   r   r+   r#   r"   <module>rW      s          ( ( ( ( ( (  0 0 0 0 0 0 0 0	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	  .c    &d    :  (5<:P    @2c 2 2 2 2ZZ)     

  
7Y 
7d 
7 
7 
7 
7%    
 9 99 95< 9 9 9 9,u| , , , , ,: :	 : : : : :6"E "E "E "E "E "E "E "E "E "Er#   