~L i UddlZddlZddlZddlZddlZddlZddlZddlmZddl m Z ddl m Z m Z mZmZmZddlmZmZddlZdudZdvdZdZd Zd Zd Zd Zd Z dwdZ dwdZgZe de!d<dZ"dZ#dZ$dZ%dZ&e&Z'dZ(dZ)dZ*dZ+dZ,dZ-dZ.dZ/dZ0d Z1d!Z2d"Z3d#Z4d$Z5d%Z6d&Z7Gd'd(e8Z9Gd)d*Z:d+Z;d,Zd/Z?d0e@fd1ZA dxd2e d3eBd4eBd0e@fd5ZCd6ZDd7ZEGd8d9ZFd:ZGered;eH<d0eBfd=ZInd0eBfd>ZId?ZJejd@dAe8fdBZLdCe8d0eMfdDZNGdEdFZOejeQZRedGZSGdHdIeeSZTdJe8d0ee fdKZUdLdMdNdOdPdQdQdQdRdSdSdT ZVidUdVdWdXdYdZd[d\d]d^d_d`dadbdcdddedfdgdhdidjdkdldmdndodpdqd^drdsdtdlZWy)yN) defaultdict) ModuleType)AnyCallableGenericOptional TYPE_CHECKING) deprecated ParamSpecc td||}|&|jdz|jjzSt |t r t |}|t|k(r|S|jr|js td|jjdd}|dz|jz}tjj|j||}|dz}tjj|j||}||||jS|jr td||jj!||S)aReturns the type if `dtype` is not provided, else casts this object to the specified type. If this is already of the correct type, no copy is performed and the original object is returned. Args: dtype (type or string): The desired type non_blocking (bool): If ``True``, and the source is in pinned memory and destination is on the GPU or vice versa, the copy is performed asynchronously with respect to the host. Otherwise, the argument has no effect. **kwargs: For compatibility, may contain the key ``async`` in place of the ``non_blocking`` argument. The ``async`` arg is deprecated. type.z)Cannot cast sparse tensor to dense tensorz.sparsez .LongTensorz)Cannot cast dense tensor to sparse tensor)_get_async_or_non_blocking __module__ __class____name__ isinstancestr_import_dotted_namer is_sparse RuntimeErrorreplacetorchTensor_values_indicessizecopy_) selfdtype non_blockingkwargsnew_module_namenew_values_type_name new_valuesnew_indices_type_name new_indicess R/mnt/ssd/data/python-lab/Trading/venv/lib/python3.12/site-packages/torch/_utils.py_typer*s9 .flFKL }$t~~'>'>>>%#E* T   ~~JK K**229bA.4u~~E\\))$/445I<X /- ?ll++D166 !< [*diik:: FGG   # #D, 77c|j|k(r|S|jdk(r|xr6|jjdtjj fv}tj |j tj||j}|j|||Stt|jd}|"J|jjd|j|5|jrt|drt|j|jj }ttj"j%||j||}ttj"j'||j||}||||j)cdddS|jr#Jd|jjdtj*|j)| }|j|||cdddS#1swYyxYw) aReturns a copy of this object in device memory. If this object is already on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination device. non_blocking (bool): If ``True`` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect. cpucuda)r!device pin_memoryNz device module is not loadedsparsez$sparse storage is not supported for z tensors)r/)r/r r_C_get_privateuse1_backend_nameemptynbytesuint8untyped_storagergetattrupperrhasattrr1rrrrrrUntypedStorage) r r/r"r0r7 device_modulenew_typeindicesvaluess r)_tor@9s {{f  {{e! dkk&6&6  HH 2 2 4; '  ++ KKMV  /   dL1E6;;5M  $ ;;    ;< $   f %# >>gmX>}33T^^5L5LMHGgell33D96;;G GFWU\\11$7E FGVTYY[9##~~ 6v{{7H7H7J6K8T %$22499;vNO  ! !$ 5"!###sCI A'I  Ic|s|St|dk7sd|vrDd}t|jj}t |j ||t jd|dS)zReturn the non-blocking flag given the function name and kwargs. Args: function_name (str): the name of the function being used. non_blocking (bool): the default value. **kwargs (dict): the kwargs passed to the function. asyncz,{}() got an unexpected keyword argument '{}'z)'async' is deprecated; use 'non_blocking')lenlistkeyspop TypeErrorformatwarningswarn) function_namer"r#messagearguments r)rrjsk  6{a7&0@ &**,}h?@@ MM=> '?r+c tjjj}||St |t r|j ||St |ttjfr|St|sJtd)z{Return the map_location location. Used for rebuild functions where the tensor device is distinct from the storage zgCallable map_location not supported with _rebuild_wrapper_subclass or _rebuild_device_tensor_from_numpy) r serialization_serialization_tls map_locationrdictgetrr/callabler)r/rRs r)_get_restore_locationrV|s| &&99FFL lD )##FF3 3  sELL&9 : L) ))7 r+ctjd|j|jj}|j |j|||S)Nrr!r/)rr4r!_untyped_storager/set_)storagestorage_offsetrstridets r)_rebuild_tensorr`s? D g6N6N6U6UVA 66'**ND& IIr+cxt|tjsJtjj |SN)rrrr2_get_tensor_metadatatensors r)get_tensor_metadatarfs- fell ++ + 88 ( ( 00r+ct|tsJt|tjsJtjj ||yrb)rrSrrr2_set_tensor_metadata)remetadatas r)set_tensor_metadatarjs: h %% % fell ++ + HH!!&(3r+cPtjjd|jjlt |jj}t |tjstj|}tj||_|Srb) r_guardsdetect_fake_moder7 _fake_devicerVrr/ fake_device)rer/s r)_restore_device_fake_moderpsw }}%%d+7  ! ! # 0 0 <*6+A+A+C+P+PQFfell3f-!&f!5F  Mr+cpt||||}||_|r t||||_t |}|Srb)r` requires_gradrj_backward_hooksrp)r\r]rr^rrbackward_hooksrires r)_rebuild_tensor_v2rusBWndF CF(FFH- ,F &v .F Mr+ctjd||jj|}|j |j||||r t ||||_t|}|S)NrXr!r/rr)rr4rZr/r[rjrsrp) r\r]rr^rrrtr!rir_s r)_rebuild_tensor_v3rxsh   ''..#  A FF7 # #^T6BAx(&A!!$A Hr+z torch.Tensor_sparse_tensors_to_validatec tjjjstj y tD]}|j tjurStj|j|j|j|jds|j tjtjtjtj hvr|j tjtjhvr!|j#|j%}}n |j'|j)}}tj*|||j-|j|j dmt/d|j d tj y#tj wxYw)NF) check_pinningz,_validate_loaded_sparse_tensors for layout ``)rr1check_sparse_tensor_invariants is_enabledryclearlayout sparse_coo _validate_sparse_coo_tensor_argsrrr is_coalesced sparse_csr sparse_csc sparse_bsr sparse_bsc crow_indices col_indices ccol_indices row_indices'_validate_sparse_compressed_tensor_argsr?NotImplementedError)r_compressed_indices plain_indicess r)_validate_loaded_sparse_tensorsrs << 6 6 8 C C E $))+-,-& Axx5+++66JJLIIKFFHNN$"'         88 0 0%2B2BCC( )6& ( )6&==&!HHJFFHHH"' *B188*ANI& R $))+#))+s F G&&G<c|tjk(rOt|dk(r |\}}}d}n|\}}}}tj|||d|}tj ||S|tj tjtjtjhvr9|\}}}}tj|||||d}tj ||Std|)z Rebuilds a sparse tensor from its sparse storage representation. Args: layout (str): The sparse storage layout of the tensor. data (tuple): The tensor's sparse storage representation. NF)check_invariantsr)rrz$rebuilding sparse tensor for layout ) rrrDsparse_coo_tensorryappendrrrrsparse_compressed_tensorr) rdatar>r?rrresultrrs r)_rebuild_sparse_tensorrLs!!! t9>$( !GVTL26 /GVT<(( VTE   $**62        ;?7M64//    "   $**62  DVHM NNr+c2tj||||Srb)r_nested_view_from_buffer)buffersizesstridesstorage_offsetss r)_rebuild_nested_tensorrvs  ) )&%/ RRr+cPt|}|j||}||_|SNrY)rVtorrrr!r/rrres r)&_rebuild_device_tensor_from_cpu_tensorrzs+ "6 *F WW5W 0F(F Mr+cvt|}tj|j||}||_|Sr)rVr from_numpyrrrrs r)!_rebuild_device_tensor_from_numpyrs9 "6 *F   d # & &U6 & BF(F Mr+c6tj|||d|S)Nmetarw)r empty_strided)r!rr^rrs r)_rebuild_meta_tensor_no_storagers    fE&  r+c ft|}tjj||||||||S)N)rr!r]rr/rr)rVrr_make_wrapper_subclass)clsr!rr^r]rr/rrs r)_rebuild_wrapper_subclassrsB#6 *F << . .  %# /  r+c|d}|tjk(r6|\}} } tj|| | |j|j} n^|tj tj fvr-|\}} } }t| turt| tur|tj k(ratj| tj|j} tj| tj|j} n`tj| tj|j} tj| tj|j} tj|| | ||j|j} ntd|| j|||||| _|| _| S)Nr)scale zero_pointr!r/rY)scales zero_pointsaxisr!r/z0Can't deserialize quantized tensor with qscheme )rper_tensor_affine_empty_affine_quantizedr!r/per_channel_affine per_channel_affine_float_qparamsr rEredoublelongfloat#_empty_per_channel_affine_quantizedrr[rrrs)r\r]rr^quantizer_paramsrrrtqscheme_rrrerrrs r)_rebuild_qtensorrsxq!G%)))/5*.. !-->>   U--u/U/UV V'7$6; <4 D$5$=%222fELLX#lluzz'.. fEKKW#llu{{7>> :: #-->>  MgYWXX KKv6(F,F Mr+cTtjj||}||_|Srb)rnn Parameterrs)rrrrtparams r)_rebuild_parameterrs' HH  t] 3E+E Lr+cltjj||}||_t ||}|Srb)rrrrs_set_obj_state)rrrrtstaters r)_rebuild_parameter_with_staters5 HH  t] 3E+E 5% (E Lr+c t|dd}|r |}|Stj|j}|r5|j|Dcic]}t ||r |t||c}f}|S|j}|Scc}w)N __getstate__)r8copyreg _slotnamesr__dict__r:)obj getstate_fnr slots_to_savenames r)_get_obj_staters#~t4K  L **3==9  !.sD)'#t,,E LLLE Ls A=ct|tr't|dk(std||d}|d}n|}d}|r%|j D]\}}t ||||r%|j D]\}}t ||||S)NzInvalid serialized state: rrB)rtuplerDritemssetattr)rr dict_state slots_statekvs r)rrs%5zQ!;E7CD D1X Ah   $$& DAq CA  %%' DAq CA   Jr+cp|jd}t|d}|ddD]}t||}|S)NrrrB)split __import__r8)r componentsr components r)rr%sCCJ Z] #C^& c9%& Jr+cTtjjj|S)aFlatten dense tensors into a contiguous 1D buffer. Assume tensors are of same dense type. Since inputs are dense, the resulting tensor will be a concatenated 1D buffer. Element-wise operation on this buffer will be equivalent to operating individually. Args: tensors (Iterable[Tensor]): dense tensors to flatten. Returns: A contiguous 1D buffer containing input tensors. )rr2_nnflatten_dense_tensors)tensorss r)_flatten_dense_tensorsr-s 88<< - -g 66r+cntjjj|Dcgc]!}tjj |#c}}tjjj|Dcgc]!}tjj |#c}}||fScc}wcc}w)abFlatten sparse tensors into two contiguous 1D buffers, one of indices and one of values. Assume tensors are of same sparse type. Args: tensors (Iterable[Tensor]): sparse tensors to flatten. Returns: A tuple of two contiguous 1D buffers, one containing input tensors' indices and the other containing the values. )rr2rrrrr)rr_ flat_indices flat_valuess r)_flatten_sparse_tensorsr>s88<<55+23a  q !3L((,,44*12Q  a 2K  $$ 4 3s &B-<&B2cVtjjj||S)aView a flat buffer using the sizes of tensors. Assume that tensors are of same dense type, and that flat is given by _flatten_dense_tensors. Args: flat (Tensor): flattened dense tensors to unflatten. tensors (Iterable[Tensor]): dense tensors whose sizes will be used to unflatten flat. Returns: Unflattened dense tensors with sizes same as tensors and values from flat. )rr2runflatten_dense_tensors)flatrs r)_unflatten_dense_tensorsrRs 88<< / /g >>r+c |\}}tjjj||Dcgc]!}tjj |#c}}tjjj||Dcgc]!}tjj |#c}}g}t|||D]6\}}} |j|j|| |j8t|Scc}wcc}w)aView flat buffer (containing indices and values) using the sizes of tensors. Assume that tensors are of same sparse type, and that flat is given by _flatten_sparse_tensors. Args: flat (tuple(Tensor, Tensor)): flattened indices and values of sparse tensors to unflatten. tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to unflatten flat. Returns: Unflattened sparse tensors with sizes same as tensors and values from flat. ) rr2rrrrrziprnewrr) rrrrr_r>r?outputsirs r)_unflatten_sparse_tensorsrbs!%L+hhll22AAu||,,Q/AGXX\\ 1 1w?!ell**1-?FGw0.1aquuQ1668,-. >B@s &D &D ctt}|D]$}||jj|&|j Dcic]\}}|t |c}}t fd|DScc}}w)aAssume that tensors are of same order as ordered_tensors within their types, e.g., from _take_tensors. Reorder them to be of same order as ordered_tensors. Args: tensors (Iterable[Tensor]): tensors to be reordered. They should be of the same order as ordered_tensors within their own types. ordered_tensors (Iterable[Tensor]): tensors whose order will be the reference. Returns: Ordered tuple of tensors with contents from tensors and order of ordered_tensors. c3VK|] }t|j"ywrb)nextr ).0re type_dict_s r) z&_reorder_tensors_as..s!OVj/0Os&))rrEr rriterr)rordered_tensors type_dictrer_collrs @r)_reorder_tensors_asr~sqD!I0&++- ''/0/8/@AGAt!T$Z-AJ OO OOBs A;c#Ktd}|D] }|j}|jrtjj |}tjj |}|j|jz|j|jzz}n!|j|jz}||}|d|z|kDr|ddkDr|dgdgx}||<|dj||dxx|z cc<|jD]\} } t| dkDs| yw)aGroup tensors into chunks. This generator yields a chunk at each time, each containing tensors of same type up to certain byte limit in total size. Args: tensors (Sequence): A sequence of tensors to be separated into chunks. size_limit (int): The limit of each chunk in bytes. Yields: Blocks of tensors of same type and within size_limit. The yielded tensors are only ordered as the original sequence within its types. c gdgS)Nrrr+r)z_take_tensors..s B7r+rBrN) rr rrrrrnumel element_sizerr?rD) r size_limitbuf_dictrer_r>r?r buf_and_sizebufrs r) _take_tensorsr s=(3?'CH  KKM   ll++F3G\\))&1F '"6"6"88,,.6#6#6#889  <<>F$7$7$99D{ ?T !J .<?Q3Fq/ !*,a 0L8A;Qv&Q4! "//#Q s8a<Is EE E c fd}|S)NcFt|_|jd<|S)Nreturn)rS__annotations__)funr#rets r)deczannotate..decs$"6l(+H% r+r)rr#rs`` r)annotaters Jr+cztjj|}| t|}g}tjj dd5|j d|D|j d|jD|ddj|d}ddd|S#1swYSxYw)Nr) threshold edgeitemsc32K|]}t|ywrbrepr)ras r)rzrender_call..s.AQ.sc3BK|]\}}|dt|yw)=Nr)rrrs r)rzrender_call..s#DTQ1#QtAwi(Ds(z, )) r overrides resolve_namer _tensor_str printoptionsextendrjoin)fnargsr#str_fnstr_argsrs r) render_callr+s __ ) )" -F ~RH    ' '!q ' A/...DV\\^DDha (+,A ./ H / Hs AB00B:ceZdZdZdZdZy)KeyErrorMessagez(str subclass that returns itself in reprrc|Srbrr s r)__repr__zKeyErrorMessage.__repr__s r+N)rr __qualname____doc__ __slots__r0rr+r)r-r-s3Ir+r-ceZdZdZddZdZy)ExceptionWrapperz?Wraps an exception plus traceback to communicate across threadsNc|tj}|d|_djt j ||_||_y)Nrr)sysexc_infoexc_typer% tracebackformat_exceptionexc_msgwhere)r r8r=s r)__init__zExceptionWrapper.__init__sB  ||~H   wwy998DE  r+cRd|jjd|jd|j}|jtk(r t |}n)t |jddr|j| |j|}|#t$r t|dwxYw)z4Reraises the wrapped exception in the current threadzCaught  z . Original rMN)rM) r9rr=r<KeyErrorr-r8 Exceptionr)r msg exceptions r)reraisezExceptionWrapper.reraises ../q LW ==H $"#&C T]]It 4---, , . c*I   .s# - .s =BB&)Nz in background)rrr1r2r>rErr+r)r5r5sJr+r5ctjjrytjjjryt tdrtj jryt tdrtjjrytjj}tt|d}|r|jr|SyNr.mpsxpumtia) rr. is_availablebackendsrHr:rIrJr2r3r8)custom_backend_namecustom_device_mods r)_get_available_device_typerOs zz  ~~&&(ue!7!7!9uf%**"9"9";((@@B':DA.;;="" r+ct}|r)|jdk(r|tjS|r)|jdk(r|tjS|r)|jdk(r|tj S|r)|jdk(r|tj S|tjjk(r|tt|SyrG) rOlowerrr.rHrIrJr2r3r8) get_member device_types r)_get_device_attrrTs,.K{((*f4%**%%{((*e3%))$${((*e3%))$${((*f4%**%%ehh<<>>'%566 r+ctdS)Nc"|jSrb)current_devicems r)rz+_get_current_device_index..'sa&6&6&8r+rTrr+r)_get_current_device_indexr[%s 8 99r+ctdS)NcFtt|jSrb)rErange device_countrXs r)rz)_get_all_device_indices..,sd51A+B&Cr+rZrr+r)_get_all_device_indicesr`*s C DDr+cH|Dcgc]tfdc}Scc}w)Nc&|jSrb)get_device_properties)rYrs r)rz)_get_devices_properties..1sq'>'>q'Ar+rZ) device_idsrs `r)_get_devices_propertiesre/sLV Wq A B WW WsrctjjdkDrtjjSy)zChecks if there are CUDA devices available and returns the device index of the current default CUDA device. Returns -1 in case there are no CUDA devices available. Arguments: ``None`` r)rr.r_rWrr+r)get_current_device_indexrh4s/  zz 1$zz((** r+r/optional allow_cpuct|trtj|}d}t|tjr<|s|jdk(rt d||jdk(rdn |j }t|tr|}|F|r6tjjr t}|St}|St d||S)a'Gets the device index from :attr:`device`, which can be a torch.device object, a Python integer, or ``None``. If :attr:`device` is a torch.device object, returns the device index if it has index. Note that for a device without a specified index, i.e., ``torch.device('xxx')``, this will return the current default device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``, CPU devices will be accepted and ``-1`` will be returned in this case. If :attr:`device` is a Python integer, it is returned as is. If :attr:`device` is ``None``, this will return the current default device of the supported runtime platform if :attr:`optional` is ``True``. i.e., the current default CUDA device will be returned if CUDA runtime is supported. Nr-z$Expected a non cpu device, but got: rgzFExpected a torch.device with a specified index or an integer, but got:) rrrr/r ValueErrorindexintjit is_scriptingrhr[)r/rirj device_idxs r)_get_device_indexrr?s(&#f% $J&%,,'V[[E1CF8LM M!;;%/RV\\ &#   yy%%'57   78 XY_X`a  r+ct|tjjs%|j rtj |S|S)z Returns a real view of a tensor if complex dtype else just the tensor need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule )rrrUninitializedParameter is_complex view_as_realrds r)_handle_complexrwnsC&%(("A"AB     6" r+ct|tjstdt ||j r"tj |jdz S|jr"tj |jdz S|tjk(rytj|jdz S)z8 Returns the element size for a dtype, in bytes zexpected torch.dtype, but got rrrB) rrr!rr rufinfobitsis_floating_pointbooliinfo)r!s r) _element_sizer~{s eU[[ );DK=IJJ {{5!&&!++  {{5!&&!++ %** {{5!&&!++r+ceZdZddZddZy)_ClassPropertyDescriptorNc||_yrb)fget)r rfsets r)r>z!_ClassPropertyDescriptor.__init__s  r+c^| t|}|jj||Srb)r r__get__)r instanceowners r)rz _ClassPropertyDescriptor.__get__s, =NE1tyy  5133r+rb)rrr1r>rrr+r)rrs 4r+rcZt|ttfs t|}t|Srb)r classmethod staticmethodr)funcs r) classpropertyrs& d[,7 84  #D ))r+U`torch._utils.is_compiling` is deprecated. Use `torch.compiler.is_compiling` instead.)categoryc>tjjSrb)rcompiler is_compilingrr+r)rrs ~~**,,r+cltjddtjj S)zd Indicates whether we are tracing/compiling with torch.compile() or torch.export(). rr) stacklevel)rJrKrrrrr+r)rrs+  c ~~**,,r+cddlm}t||rtjj tjj j} tj|j| tjj|yytj|y#| tjj|wwxYw)Nr)FunctionalTensor) #torch._subclasses.functional_tensorrrrr2_unset_dispatch_mode_TorchDispatchModeKey FUNCTIONAL_functionalize_syncelem_set_dispatch_mode)r_rmaybe_functional_modes r)rrsE!%&!& = = HH * * 5 5!  C  % %aff -$0++,AB1 !!!$%0++,AB1s B--$CrrScPtt|d}|td|d|d|S)NzDevice 'z<' does not have a corresponding module registered as 'torch.z'.)r8rr)rSr<s r)_get_device_modulersAE;5M{m#_`k_lln o   r+rcXdtfd}t|tf|d|ddS)Nis_initcfd}|S)Ncjr|jj}n |j}td|)Nz&Tried to instantiate dummy base class )rrr)rr'r# class_namers r)err_fnz/_dummy_type..get_err_fn..err_fns1 ]]33  \\ !G |TU Ur+r)rrs` r) get_err_fnz_dummy_type..get_err_fns V r+TF)r>__new__)r|r object)rrs r) _dummy_typers5D  vij&6:eCTU r+c*eZdZdZdZdZdefdZy)_LazySeedTrackerc.d|_d|_g|_yrbmanual_seed_all_cbmanual_seed_cb call_orderr/s r)r>z_LazySeedTracker.__init__s"&"r+cP||f|_|j|jg|_yrbrr cbr:s r)queue_seed_allz_LazySeedTracker.queue_seed_alls&#%y/..0G0GHr+cP||f|_|j|jg|_yrb)rrrrs r) queue_seedz_LazySeedTracker.queue_seeds&!9o22D4G4GHr+rc|jSrb)rr/s r) get_callsz_LazySeedTracker.get_callss r+N)rrr1r>rrrErrr+r)rrs  I I 4r+rPcneZdZdefdZdeedfddfdZdejdejddfd Z y) CallbackRegistryrc ||_g|_yrb)r callback_list)r rs r)r>zCallbackRegistry.__init__ s 68r+rNrc:|jj|yrb)rr)r rs r) add_callbackzCallbackRegistry.add_callbacks !!"%r+r'r#c|jD] } ||i| y#t$r#tjd|jY:wxYw)Nz6Exception in callback for %s registered with gpu trace)rrBloggerrDr)r r'r#rs r)fire_callbackszCallbackRegistry.fire_callbackssQ$$ B D#F#    Ldii s)AA) rrr1rr>rrrr'r#rrr+r)rr sL9S9&x40&T&AFFahh4r+r module_namecRtjj|dx}|Stjj |x}btjj |}|tj|<|jJd|jj||Sy)Nz)The loader attribute should always be set) r7modulesrT importlibutil find_specmodule_from_specloader exec_module)rmodulespecs r) try_importrs++//+t44A ((55B006#) K {{&S(SS& ' r+builtinsrqueuereprlibzcollections.abc collectionsdbmio) __builtin__copy_regQueuer_abcollUserDictUserList UserStringwhichdbStringIO cStringIO)rxrange)rr^)rreduce) functoolsr)rintern)r7r)runichr)rchr)runicode)rr)rr)rrn) itertoolsizip)rr)rimap)rmap)rifilter)rfilter)r ifilterfalse)r filterfalse)r izip_longest)r zip_longest)rIterableUserDict)rr)rr)rr)rr)rr)r basestring) exceptions StandardError)rrB)rr)NF)Frb)FF)Xrrrloggingr7r:rJrrtypesrtypingrrrrr typing_extensionsr r rr*r@rrVr`rfrjrprurxryrErrrrrr_rebuild_xla_tensorrrrrrrrrrrrrrr rr+rr-r5rOrTr[r`rernrhr|rrrwr~rr FutureWarningrr lru_cacherr rr getLoggerrrrrrIMPORT_MAPPING NAME_MAPPINGrr+r)rsO #BB3 %8P.#b$zJ 14"> 057T.165,p'OTS8 20f 6,7"%( ? 8P, J  ,c  F"  : E X #, ,,, ,^ ,$44* _-$- - -$ -%<QC c d  0   8 $ cNwqz$CHZ$8.    &4602   3  0 ..4"#?"#?%&A9!"? "#6!"$%>#$9% r+