
    %Vji;"                         U d dl Z d dlZd dlmZ d dlmZ d dlZd dlmZm	Z	  G d d          Z
dede
d	efd
Zd Zeaeed<   e j        d             Z G d d          ZddZdS )    N)Callable)
deprecated)KernelRegistrationHandlec                   n    e Zd ZdZdefdZed             Zej        d             Zddde	d	ed
e
fdZdS )FakeImplHolderz0A holder where one can register an fake impl to.qualnamec                 "    || _         g | _        d S N)r	   kernels)selfr	   s     R/root/voice-cloning/.venv/lib/python3.11/site-packages/torch/_library/fake_impl.py__init__zFakeImplHolder.__init__   s    % &(    c                 P    t          | j                  dk    rd S | j        d         S )Nr   )lenr   )r   s    r   kernelzFakeImplHolder.kernel   s)    t|!!4|Br   c                      t          d          )NzUnable to directly set kernel.RuntimeError)r   values     r   r   zFakeImplHolder.kernel   s    ;<<<r   Fallow_overridefuncsourcereturnc                >    |s j         %t          d j         d j         j         d          t          j                             j        d          rt          d j         d          t          j                             j        d          rt          d j         d          t          ||           j        	                                fd	}t           j                   }|                     j        |d|
           t          |          }|S )z}Register an fake impl.

        Returns a RegistrationHandle that one can use to de-register this
        fake impl.
        Nz!register_fake(...): the operator z( already has an fake impl registered at .Metaz already has an DispatchKey::Meta implementation via a pre-existing torch.library or TORCH_LIBRARY registration. Please either remove that registration or don't call register_fake.CompositeImplicitAutograda%   already has an implementation for this device type via a pre-existing registration to DispatchKey::CompositeImplicitAutograd.CompositeImplicitAutograd operators do not need an fake impl; instead, the operator will decompose into its constituents and those can have fake impls defined on them.c                  <    j                                         d S r   )r   remove)r   r   s   r   deregister_fake_kernelz7FakeImplHolder.register.<locals>.deregister_fake_kernelN   s    L'''''r   r   )r   r   r	   r   torch_C%_dispatch_has_kernel_for_dispatch_keyr   r   appendconstruct_meta_kernelimplr   )	r   r   r   libr   r$   meta_kernelhandler   s	   `       @r   registerzFakeImplHolder.register"   sq     	{&"- - -{)- - -  
 x==dmVTT "& & & &   x==:   #< < < <
 
 
 f%%F###	( 	( 	( 	( 	( 	( ,DM4@@VNSSS#$:;;r   N)__name__
__module____qualname____doc__strr   propertyr   setterr   r   r.    r   r   r   r      s        ::( ( ( ( (     X 
 ]= = ]= CH3 3 33&)3	3 3 3 3 3 3r   r   r	   fake_impl_holderr   c                 p     j         J t          j        j         j                   fd            }|S )Nc                      j         J j         j        fd}t          |          5   j         | i |cd d d            S # 1 swxY w Y   d S )Nc                  .    t            d d          )Nz (a  ): You're trying to run this operator with meta Tensors (as opposed to FakeTensors), but this operator may return an output Tensor with data-dependent shape. Meta Tensors don't support operators with outputs that have data-dependent shapes but FakeTensors do. If your operator does not return an output with data-dependent shape, make sure the FakeTensor and/or meta kernel does not call torch.library.get_ctx(). Otherwise, please use FakeTensors.r   )r	   r   s   r   error_on_ctxz@construct_meta_kernel.<locals>.meta_kernel.<locals>.error_on_ctx`   s8     O Ov O O O	 	 	r   )r   r   set_ctx_getter)argskwargsr;   r   r7   r	   s      @r   r,   z*construct_meta_kernel.<locals>.meta_kernel[   s    &222!(/
	 
	 
	 
	 
	 
	 L)) 	< 	<*#*D;F;;	< 	< 	< 	< 	< 	< 	< 	< 	< 	< 	< 	< 	< 	< 	< 	< 	< 	<s   AAA)r   	functoolswrapsr   )r	   r7   r,   s   `` r   r)   r)   X   sU    "..._%,122< < < < < 32<& r   c                      d S r   r6   r6   r   r   get_nonerB   r   s    4r   global_ctx_getterc              #   8   K   t           }	 | a d V  |a d S # |a w xY wr   )rC   )
ctx_getterprevs     r   r<   r<   y   s?       D!& D    s    c                   z    e Zd ZdZd Z ede          ddddej        fd	            Z	d
dddej        fdZ
dS )FakeImplCtxzO
    Context object for writing fake implementations for custom operators.
    c                 :    || _         |j        | _        || _        d S r   )
_fake_mode	shape_env
_shape_env_op)r   rJ   rM   s      r   r   zFakeImplCtx.__init__   s    $$.r   zM`create_unbacked_symint` is deprecated, please use `new_dynamic_size` instead)category   Nminmaxr   c                0    |                      ||          S NrP   )new_dynamic_sizer   rQ   rR   s      r   create_unbacked_symintz"FakeImplCtx.create_unbacked_symint   s    
 $$#$666r   r   c                l   | j         | j         j        s)t          j        j                            | j                  t          |t          j                  st          |t          j                  rt          d| d| d          |dk     rt          d| d          t          | j         ||          S )a	  Constructs a new symint (symbolic int) representing a data-dependent value.

        This is useful for writing the fake implementation (which is necessary
        for torch.compile) for a CustomOp where an output Tensor has a size
        that depends on the data of the input Tensors.

        Args:
            min (int): A statically known inclusive lower bound for this symint. Default: 0
            max (Optional[int]): A statically known inclusive upper bound for this
                symint. Default: None

        .. warning:

            It is important that the ``min`` and ``max`` (if not None) values are set
            correctly, otherwise, there will be undefined behavior under
            torch.compile. The default value of ``min`` is 2 due to torch.compile
            specializing on 0/1 sizes.

            You must also verify that your implementation on concrete Tensors
            (e.g. CPU/CUDA) only returns Tensors where the size that corresponds
            to the symint also has respects these constraint.
            The easiest way to do this is to add an assertion in the CPU/CUDA/etc
            implementation that the size follows these bounds.

        Example::

            >>> # An operator with data-dependent output shape
            >>> lib = torch.library.Library("mymodule", "FRAGMENT")
            >>> lib.define("mymodule::custom_nonzero(Tensor x) -> Tensor")
            >>>
            >>> @torch.library.register_fake("mymodule::custom_nonzero")
            >>> def _(x):
            >>>     # Number of nonzero-elements is data-dependent.
            >>>     # Since we cannot peek at the data in an fake impl,
            >>>     # we use the ctx object to construct a new symint that
            >>>     # represents the data-dependent size.
            >>>     ctx = torch.library.get_ctx()
            >>>     nnz = ctx.new_dynamic_size()
            >>>     shape = [nnz, x.dim()]
            >>>     result = x.new_empty(shape, dtype=torch.int64)
            >>>     return result
            >>>
            >>> @torch.library.impl(lib, "custom_nonzero", "CPU")
            >>> def _(x):
            >>>     x_np = x.numpy()
            >>>     res = np.stack(np.nonzero(x_np), axis=1)
            >>>     return torch.tensor(res, device=x.device)

        Nzctx.new_dynamic_size(min=z, max=zZ): expected min and max to be statically known ints but got SymInt. This is not supported.r   zc, ...): expected min to be greater than or equal to 0: this API can only create non-negative sizes.)rL   allow_dynamic_output_shape_opsr%   _subclassesfake_tensorDynamicOutputShapeExceptionrM   
isinstanceSymInt
ValueErrorallocate_sizerV   s      r   rU   zFakeImplCtx.new_dynamic_size   s    f O#?A $ #/KKDHUUUc5<(( 	JsEL,I,I 	*C * *s * * *   77'C ' ' '   T_c3777r   )r/   r0   r1   r2   r   r   FutureWarningr%   r^   rW   rU   r6   r   r   rH   rH      s           
 ZW   -.4 7 7 7EL 7 7 7	 7 '(T F8 F8 F8el F8 F8 F8 F8 F8 F8r   rH   c                     |                                  }t          j        j        j                            |||           |S rT   )rW   r%   fxexperimentalsymbolic_shapes_constrain_range_for_size)rK   min_valmax_valresults       r   r`   r`      sG    --//F	H)CCG D    Mr   )r   N)
contextlibr?   typingr   typing_extensionsr   r%   torch._library.utilsr   r   r   r3   r)   rB   rC   __annotations__contextmanagerr<   rH   r`   r6   r   r   <module>rp      sO                  ( ( ( ( ( (  ; ; ; ; ; ; ; ;J J J J J J J JZC > h    4   ' 8 & & & ! ! !W8 W8 W8 W8 W8 W8 W8 W8t     r   