
    1;jiZ                        d Z ddlmZmZmZmZ ddlmZ ddlm	Z	m
Z
  e	            rddlZddlmZmZmZmZ  ej        e          Z	 ddlmZ n# eef$ r d	 ZY nw xY w	 	 	 	 d d
eeef         deeed         df                  ded         ded         ded         f
dZdefdZdddededdfdZdeddddded         fdZdS )!z1
PyTorch utilities: Utilities related to PyTorch
    )ListOptionalTupleUnion   )logging)is_torch_availableis_torch_versionN)fftnfftshiftifftn	ifftshift)allow_in_graphc                     | S )N )clss    U/root/voice-cloning/.venv/lib/python3.11/site-packages/diffusers/utils/torch_utils.pymaybe_allow_in_graphr   "   s    
    shape	generatorztorch.Generatordeviceztorch.devicedtypeztorch.dtypelayoutztorch.layoutc           	      .    | d         }pt           j        |pt          j        d          }t          t                    sj        j        nd         j        j        }||j        k    r3|dk    r-d|dk    r$t                              d| d| d| d           n'||j        k    r|d	k    rt          d
| d| d          t          t                    rt                    dk    rd         t          t                    rVd dd         z     fdt          |          D             }t          j        |d                              |          }n,t          j                                       |          }|S )zA helper function to create random tensors on the desired `device` with the desired `dtype`. When
    passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor
    is always created on the CPU.
    r   cpuNmpszBThe passed generator was created on 'cpu' even though a tensor on zB was expected. Tensors will be created on 'cpu' and then moved to zk. Note that one can probably slighly speed up this function by passing a generator that was created on the z device.cudazCannot generate a z! tensor from a generator of type .r   )r   c           	      N    g | ]!}t          j        |                    "S )r   r   r   r   )torchrandn).0ir   r   r   rand_devicer   s     r   
<listcomp>z randn_tensor.<locals>.<listcomp>K   sD     
 
 
 K1kQV_efff
 
 
r   dimr!   )r"   stridedr   
isinstancelisttypeloggerinfo
ValueErrorlenrangecattor#   )	r   r   r   r   r   
batch_sizegen_device_typelatentsr&   s	   `` ``   @r   randn_tensorr8   &   s    KqJ$u}F*u|E**F7A)T7R7Rp)*//XabcXdXkXpfk))o.F.FKwY_ w wKQw wflw w w  
 ++60I0Im&mm[jmmmnnn )T"" !s9~~':':aL	)T"" uuQRRy 
 
 
 
 
 
 
 
:&&
 
 
 )G+++..v66+eyTYbhiiillmsttNr   returnc                     t          dd          st          t          d          sdS t          | t          j        j        j                  S )z:Check whether the module was compiled with torch.compile()<z2.0.0_dynamoF)r
   hasattrr"   r+   r<   
eval_frameOptimizedModule)modules    r   is_compiled_modulerA   V   sB    W%% WUI-F-F ufem6FGGGr   x_intorch.Tensor	thresholdscalec                    | }|j         \  }}}}||dz
  z  dk    s||dz
  z  dk    r |                    t          j                  }t	          |d          }t          |d          }|j         \  }}}}t          j        ||||f|j                  }	|dz  |dz  }}
||	d|
|z
  |
|z   ||z
  ||z   f<   ||	z  }t          |d          }t          |d          j
        }|                    | j                  S )	zFourier filter as introduced in FreeU (https://arxiv.org/abs/2309.11497).

    This version of the method comes from here:
    https://github.com/huggingface/diffusers/pull/5164#issuecomment-1732638706
    r   r   )r   )r(   )r      .)r   r4   r"   float32r   r   onesr   r   r   realr   )rB   rD   rE   xBCHWx_freqmaskcrowccol
x_filtereds                r   fourier_filterrW   ]   s/    	AJAq!Q 	
QUa1q5ka//DDu}D%% !"""Ff(+++FJAq!Q:q!Ql18444Daa$DZ_DdY	!114)3CdYFV3V	VWd]F v8,,,Fv8,,,1J==tz=***r   resolution_idxhidden_statesres_hidden_states)rC   rC   c                 B   | dk    rH|j         d         dz  }|ddd|f         |d         z  |ddd|f<   t          |d|d                   }| dk    rH|j         d         dz  }|ddd|f         |d         z  |ddd|f<   t          |d|d	                   }||fS )
a]  Applies the FreeU mechanism as introduced in https:
    //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU.

    Args:
        resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied.
        hidden_states (`torch.Tensor`): Inputs to the underlying block.
        res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block.
        s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features.
        s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features.
        b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
        b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
    r   r   rI   Nb1s1)rD   rE   b2s2)r   rW   )rX   rY   rZ   freeu_kwargsnum_half_channelss        r   apply_freeurb   |   s     )/2a7/<QQQ@RAR@R=R/SVbcgVh/haaa++++,*+<Q]^bQcddd)/2a7/<QQQ@RAR@R=R/SVbcgVh/haaa++++,*+<Q]^bQcddd+++r   )NNNN)__doc__typingr   r   r   r    r   import_utilsr	   r
   r"   	torch.fftr   r   r   r   
get_logger__name__r.   torch._dynamor   r   ImportErrorModuleNotFoundErrorr8   boolrA   intrW   rb   r   r   r   <module>ro      s    0 / / / / / / / / / / /       > > > > > > > >  ;LLL::::::::::::		H	%	%DDDDDDD()        NR'+%)'+- --d#457HHIJ- ^$- M"	-
 ^$- - - -`H$ H H H H+ +C + + + + + +>,,(6,KY,
)*, , , , , ,s   A 
AA