
    QVji              	           d dl mZmZ d dlZd dlZd dlmZ d dlm	Z
 ddgZ G d dej                  Zej                            ed           ddd	d
edee
         dee         ddfdZdS )    )AnyOptionalN)
_to_dlpack)DeviceDLDeviceTypefrom_dlpackc                   J    e Zd ZdZdZdZdZdZdZdZ	dZ
d	Zd
ZdZdZdZdZdZdS )r   )   )   )   )   )   )   )	   )
   )   )   )   )   )   )   )   N)__name__
__module____qualname__kDLCPUkDLCUDAkDLCUDAHost	kDLOpenCL	kDLVulkankDLMetalkDLVPIkDLROCMkDLROCMHost	kDLExtDevkDLCUDAManaged	kDLOneAPI	kDLWebGPU
kDLHexagonkDLMAIA     L/root/voice-cloning/.venv/lib/python3.11/site-packages/torch/utils/dlpack.pyr   r      sY        FGKIIHFGKINIIJGGGr,   a  to_dlpack(tensor) -> PyCapsule

Returns an opaque object (a "DLPack capsule") representing the tensor.

.. note::
  ``to_dlpack`` is a legacy DLPack interface. The capsule it returns
  cannot be used for anything in Python other than use it as input to
  ``from_dlpack``. The more idiomatic use of DLPack is to call
  ``from_dlpack`` directly on the tensor object - this works when that
  object has a ``__dlpack__`` method, which PyTorch and most other
  libraries indeed have now.

.. warning::
  Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
  Behavior when a capsule is consumed multiple times is undefined.

Args:
    tensor: a tensor to be exported

The DLPack capsule shares the tensor's memory.
)devicecopy
ext_tensorr.   r/   returnztorch.Tensorc                <   t          | d          r]i }d|d<   |||d<   |t          |t                    rt          j        |          }t          |t          j                  sJ dt          |                       t          j                            |          |d<   |                                 }|d         t          j
        t          j        fv rYt          j                            d	|d
                    }|d         t          j
        k    }|r|j        dk    rd
n|j        }||d<   	  | j        di |}nB# t           $ r% |                    d            | j        di |}Y nw xY w||
J d            | }t          j                            |          S )a  from_dlpack(ext_tensor) -> Tensor

    Converts a tensor from an external library into a ``torch.Tensor``.

    The returned PyTorch tensor will share the memory with the input tensor
    (which may have come from another library). Note that in-place operations
    will therefore also affect the data of the input tensor. This may lead to
    unexpected issues (e.g., other libraries may have read-only flags or
    immutable data structures), so the user should only do this if they know
    for sure that this is fine.

    Args:
        ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
            The tensor or DLPack capsule to convert.

            If ``ext_tensor`` is a tensor (or ndarray) object, it must support
            the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
            method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
            an opaque ``PyCapsule`` instance, typically produced by a
            ``to_dlpack`` function or method.

        device (torch.device or str or None): An optional PyTorch device
            specifying where to place the new tensor. If None (default), the
            new tensor will be on the same device as ``ext_tensor``.

        copy (bool or None): An optional boolean indicating whether or not to copy
            ``self``. If None, PyTorch will copy only if necessary.

    Examples::

        >>> import torch.utils.dlpack
        >>> t = torch.arange(4)

        # Convert a tensor directly (supported in PyTorch >= 1.10)
        >>> t2 = torch.from_dlpack(t)
        >>> t2[:2] = -1  # show that memory is shared
        >>> t2
        tensor([-1, -1,  2,  3])
        >>> t
        tensor([-1, -1,  2,  3])

        # The old-style DLPack usage, with an intermediate capsule object
        >>> capsule = torch.utils.dlpack.to_dlpack(t)
        >>> capsule
        <capsule object "dltensor" at ...>
        >>> t3 = torch.from_dlpack(capsule)
        >>> t3
        tensor([-1, -1,  2,  3])
        >>> t3[0] = -9  # now we're sharing memory between 3 tensors
        >>> t3
        tensor([-9, -1,  2,  3])
        >>> t2
        tensor([-9, -1,  2,  3])
        >>> t
        tensor([-9, -1,  2,  3])

    
__dlpack__)r
   r   max_versionNr/   z&from_dlpack: unsupported device type: 	dl_devicer   zcuda:r
   streamzQdevice and copy kwargs not supported when ext_tensor is already a DLPack capsule.r+   )hasattr
isinstancestrtorchr.   type_C_torchDeviceToDLDevice__dlpack_device__r   r   r#   cudacurrent_streamcuda_streamr3   	TypeErrorpop_from_dlpack)	r0   r.   r/   kwargs
ext_devicer6   is_cuda
stream_ptrdlpacks	            r-   r   r   :   s   ~ z<(( 2 "$ &}!F6N
 &#&& .f--fel33  GfGG 3 #(("A"A&"I"IF;1133
 a=\1<3GHHHZ../Fz!}/F/FGGF !m|';;G &Y&*<*A*AvGYJ)F8	5*Z*44V44FF 	5 	5 	5JJ}%%%*Z*44V44FFF	5 ~$,,( #/,.
 8  (((s   /D= =,E,+E,)typingr   r   r:   enumtorch._Cr   	to_dlpacktorch.typesr   _Device__all__IntEnumr   r<   _add_docstrboolr   r+   r,   r-   <module>rT      s                     , , , , , , ) ) ) ) ) ) 
    4<   &   Y !   8 !%	r) r) r)r) Wr) 4.	r)
 r) r) r) r) r) r)r,   