
    &Vji                       d dl mZ d dlmZmZ d dlZerd dlmZ ddZ e            r$ej	        
                                s ed           e            rd d	lmZmZmZmZmZmZmZmZmZmZmZ d
dgZ G d d
          ZdS )    )annotations)AnyTYPE_CHECKINGN)TracebackTypereturnboolc                 6    t          t          j        d          S )N_dist_autograd_init)hasattrtorch_C     ]/root/voice-cloning/.venv/lib/python3.11/site-packages/torch/distributed/autograd/__init__.pyis_availabler      s    582333r   z/Failed to initialize torch.distributed.autograd)_current_context_get_debug_info_get_max_id_init_is_valid_context_new_context_release_context_retrieve_contextbackwardDistAutogradContextget_gradientscontextr   c                  "    e Zd ZdZddZddZdS )r   a!  
    Context object to wrap forward and backward passes when using
    distributed autograd. The ``context_id`` generated in the ``with``
    statement  is required to uniquely identify a distributed backward pass
    on all workers. Each worker stores metadata associated with this
    ``context_id``, which is required to correctly execute a distributed
    autograd pass.

    Example::
        >>> # xdoctest: +SKIP
        >>> import torch.distributed.autograd as dist_autograd
        >>> with dist_autograd.context() as context_id:
        >>>     t1 = torch.rand((3, 3), requires_grad=True)
        >>>     t2 = torch.rand((3, 3), requires_grad=True)
        >>>     loss = rpc.rpc_sync("worker1", torch.add, args=(t1, t2)).sum()
        >>>     dist_autograd.backward(context_id, [loss])
    r   intc                Z    t                      | _        | j                                        S N)r   autograd_context_context_id)selfs    r   	__enter__zcontext.__enter__8   s#     ,$00222r   exc_typetype[BaseException] | None	exc_valueBaseException | None	tracebackTracebackType | NoneNonec                R    t          | j                                                   d S r!   )r   r"   r#   )r$   r&   r(   r*   s       r   __exit__zcontext.__exit__<   s'     	.::<<=====r   N)r   r   )r&   r'   r(   r)   r*   r+   r   r,   )__name__
__module____qualname____doc__r%   r.   r   r   r   r   r   %   sF         $3 3 3 3> > > > > >r   )r   r   )
__future__r   typingr   r   r   typesr   r   r   r
   RuntimeErrortorch._C._distributed_autogradr   r   r   r   r   r   r   r   r   r   r   __all__r   r   r   r   <module>r9      sn   " " " " " " % % % % % % % %   $######4 4 4 4 <>> J%(6688 J
,H
I
II<>>                           n
%> > > > > > > > > >r   