~L i*"ddlZddlZddlZddlZddlmZddlmZddlm Z ddl m Z m Z m Z mZmZmZddlmZmZddlZddlmZddlmZmZmZmZmZmZddlmZm Z m!Z!m"Z"m#Z#ed Z$ed ejJ Z&d e ee&e$fd fde ee&e$fd ffdZ'dZ(dZ)dZ*Gdd ej,jJZ+dZ,y)N) OrderedDict)deepcopy)Number)AnyCallablecastOptionalTypeVarUnion) Concatenate ParamSpec)check_serializing_named_tensor is_ellipsisresolve_ellipsissingle_ellipsis_indexunzip_namedshape update_names)get_default_nowrap_functionshandle_torch_functionhas_torch_functionhas_torch_function_unaryhas_torch_function_variadic_P _TensorLike)boundfTensorreturnc tjdtdtjdtj ddffd S)Nselfargskwargsrrc |g|}t|rt|g|i|S|g|i|S#t$r tcYSwxYwN)rr TypeErrorNotImplemented)r r!r"sargsrwrappeds S/mnt/ssd/data/python-lab/Trading/venv/lib/python3.12/site-packages/torch/_tensor.pyr(zN_handle_torch_function_and_wrap_type_error_to_not_implemented..wrapped&s\ "K4KE!%(,WeNeNvNNT+D+F+ + "! ! "s 0 0AA) functoolswrapsrrr!r")rr(s`@r)=_handle_torch_function_and_wrap_type_error_to_not_implementedr,#sK__Q"k""''"RYY"8"" NcV|tur||S||j|}||_|Sr$)r as_subclass__dict__)functyper!dictrets r)_rebuild_from_typer55s3 v~T{ + ! !$ 'CCL Jr-c||}t||ur|j|}t|jdtj tj ur|j ||St jj||}|S)N __setstate__) r2r/getattr __class__rr7torch_utils_set_obj_state)r1new_typer!stater4s r)_rebuild_from_type_v2r?>s +C Cy ooh'  ~v/B/BC"" #  Jll))#u5 Jr-ctjdtjdtjdtjdtj dtj dtjdtjdtjd tjd tjd tjd tjd tjdtjdi|S)Nze,d1ede$eee-fd2dfd<Z?e,ded=Z@ej&jZBej&jZDej&jZFd>ZGd?ZHd@ZIdAZJdBZKdfdCZLdDZMdEeNd2efdFZOePdGZQdHZRfdIZSfdJZTfdKZUdLZVdMZWdNZXd dOdPeeeYeZjffdQZ\fdRZ]e^dgdSZ_ejZad d d d dTdUeeNdVeebe$e$fdWeebecje$fdXeefdYZed2ebecje$ffdZZfd[ZxZgS)hr _is_paramct|rttj|f|Sgd}|D]}|jj |d y)aMClears any data cached in the tensor's ``__dict__`` that would prevent the tensor from being serialized. For example, subclasses with custom dispatched sizes / strides cache this info in non-serializable PyCapsules within the ``__dict__``, and this must be cleared out for serialization to function. Any subclass that overrides this MUST call ``super()._clear_non_serializable_cached_data().`` Additional data cleared within the override must be able to be re-cached transparently to avoid breaking subclass functionality. )_sym_sizes_capsule_sym_sizes_capsule_len_sym_strides_capsule_sym_strides_capsule_lenN)rrr#_clear_non_serializable_cached_datar0pop)r CACHED_SIZES_STRIDES_KEYSkeys r)rZz*Tensor._clear_non_serializable_cached_dataosU $D )(::TGT  % ! - )C MM  c4 ( )r-c  t|rttj|f||S|js t dt ||vr|t |Stj5|js|jjdvsxtjj|s5|jjtjjk(s$t|turE|jdk(r2|j!}t|t|ur|t d|j#j%|}|j&r|j)tj*k(r0|j)|j-|j/f}n|j)tj0tj2fvr?|j)|j5|j7|j9f}nt d|j)dtj:j=tj>jA|jB|jDd|jG|jI|jK||jL|jN}t|t|urt d |jQg}t|t|ur t d |jS||jG|jI|jK|jUr|jW}|jYr|j[}|jLr|j]|j^ |j^j||_/t|turut|t|ur t d tajb|jd}|D]0}tg||sti||tktm|||2|jotk|jp||_8||t |<|cdddS#1swYyxYw) NaOnly Tensors created explicitly by the user (graph leaves) support the deepcopy protocol at the moment. If you were attempting to deepcopy a module, this may be because of a torch.nn.utils.weight_norm usage, see https://github.com/pytorch/pytorch/pull/103001)lazyxlamtiampsmaiametaipuraiThe default implementation of __deepcopy__() for wrapper subclasses only works for subclass types that implement clone() and for which cloning returns another instance of the same subclass. You should either properly implement clone() for your subclass or override __deepcopy__() if it is intended behavior for clone() to return an instance of a different type.zUnsupported qscheme z in deepcopyT wrap_storagerQ _internalzThe default implementation of __deepcopy__() for quantized tensors expects the tensor returned by torch._utils._rebuild_qtensor() to match the type of the instance being copied. If you encounter this, please open an issue on PyTorch's GitHub.aThe default implementation of __deepcopy__() for non-wrapper subclasses only works for subclass types that implement new_empty() and for which that function returns another instance of the same subclass. You should either properly implement new_empty() for your subclass or override __deepcopy__() if it is intended behavior for new_empty() to return an instance of a different type.zType of deepcopy result does not match the type of the source tensor. If you encounter this, please open an issue on PyTorch's GitHub.)9rrr __deepcopy__is_leaf RuntimeErroridr:no_grad is_sparsedevicer2_C _has_storage_get_privateuse1_backend_namedata_ptrclone_typed_storage _deepcopy is_quantizedqschemeper_tensor_affineq_scale q_zero_pointper_channel_affine per_channel_affine_float_qparamsq_per_channel_scalesq_per_channel_zero_pointsq_per_channel_axisr;_rebuild_qtensorstorage TypedStorage_untyped_storagerQstorage_offsetsizestride requires_grad_backward_hooks new_emptyset_is_conj conj_physicalis_negnegrequires_grad_gradcopyreg _slotnamesr9hasattrsetattrrr8rZr0)r memo new_tensor new_storagequantizer_params slots_to_saveslots r)rizTensor.__deepcopy__s #D )()<) ! ]]_u ;;##HI--d3 ((EHH,R,R,TTJf,A1E!ZZ\  #4:5&*#113==dC $$ ||~)@)@@ LLN LLN --/,( 00>>, !LLN 557 ::< 335 ,(+24<<>2B,O "'!>!> 22)4)E)E"&**&*3 ++-  (**,, "JJ'tDz9*H"&!3JJ'tDz9*?OO#T%8%8%:DIIK||~%/%=%=%? {{}%/^^%5 !!))+yy$"&))"8"8"> Dz' #4:5&[ !( 2 24>> B )WDtT* D(74;NPT2UVW  4 4 6"*4==$"?J 'DDNku u u s/O*SASS ctjjj}tjj |}t tdr1t|tjjjur|st|tur|s|j|St|rttj|f||S|j|\}}|j!t"|t|||ffS)N _subclasses)r: serialization_serialization_tlsmaterialize_fake_tensorsr;_get_obj_staterr2r fake_tensor FakeTensorr_reduce_ex_internalrr __reduce_ex__rZr?)r protorr>r1r!s r)rzTensor.__reduce_ex__s    2 2 K K ! ++D1 E= )T e//;;FFF(4jF"5++E2 2 #D )()=)=weT T--e4 d 002%d4j$'FGGr-ct|rttj|f|Stjj d|j S)a| storage() -> torch.TypedStorage Returns the underlying :class:`TypedStorage`. .. warning:: :class:`TypedStorage` is deprecated. It will be removed in the future, and :class:`UntypedStorage` will be the only storage class. To access the :class:`UntypedStorage` directly, use :attr:`Tensor.untyped_storage()`. ) stacklevel)rrrrr:_warn_typed_storage_removalrur s r)rzTensor.storage&sE $D )($$G G 11Q1?""$$r-cf|j}tj||jdS)NTrf)untyped_storager:rrQ)r rs r)ruzTensor._typed_storage9s...0!!( d  r-ct|ddlm}||t}tj j j}tj j j}|jjdvsTtjj|s|jjtjjk(rd|r td|j}tj j"||j$t'|j|j(ffS|jjdk(rr|rt+j,d|j$t/|j1|j3|j(f}tj j4|fS|j6r|r td|j9tj:k(r0tj:|j=|j?f}n|j9tj@tjBfvr?tj@|jE|jG|jIf}ntd|j9tjJjM|jOjP|j$d |jSt/|j1|j3||j(|f} tj jT| fS|jVr|jXtjZk(rK|jX|j]|j_|j1|jaff} ntcd |jXd tj jd| fS|jXtjftjhtjjtjlhvr|jXtjftjjhvr!|jo|jq} } n |js|ju} } |jX| | |jw|j1ff} tj jd| fS|jxrg|r td |jw|j{|j}|jf}tj j|fSt|tjur(t|jtjjurt|tjjjsAt|tjjjs|jdk(rt||j$t/|j1|j3|jS|jX|j|j(f}tj j|fSt|tjurt|jtjjurt|tjjjr|r|st||j$t/|j1|j3|jS|jX|j|j(f}tj j|fStjJj}|j$|vr+tj j}|j}n^tj j}tjJjM|jOjP|j$d }ttdrAt|tjjjr|r|j|_P||jSt/|j1|j3|j(|f}t|tjJjr||j$fz}tj j|}|r||fz}||fS)Nr)warn_if_has_hooks)r`rcrazTCannot serialize tensors on backends with no storage under skip_data context managerrdzQSerializing tensors on the meta device under skip_data context manager is a no-opz`Cannot serialize qtensor under skip_data context manager, file an issue if you need this featurez3Serialization is not supported for tensors of type Trfz(sparse tensor __reduce_ex__ for layout ``zfCannot serialize nested tensor under skip_data context manager, file an issue if you need this featurer)Srtorch.utils.hooksrrr:rr skip_datarror2rprqrrrkcpur;&_rebuild_device_tensor_from_cpu_tensorrQstrrwarningswarntuplerr_rebuild_meta_tensor_no_storagerwrxryrzr{r|r}r~rrrrrurrrrnlayout sparse_coo_indices_values is_coalescedNotImplementedError_rebuild_sparse_tensor sparse_csr sparse_csc sparse_bsr sparse_bsc crow_indices col_indices ccol_indices row_indicesvalues is_nested_nested_tensor_size_nested_tensor_strides_nested_tensor_storage_offsets_rebuild_nested_tensorr__torch_dispatch__ isinstancerfunctional_tensorFunctionalTensorrrrs_rebuild_wrapper_subclass _new_dtypes_rebuild_tensor_v3r_rebuild_tensor_v2r _fake_deviceUntypedStorageget_tensor_metadata)r rrbackward_hooksrr cpu_tensorarg_metar args_qtensor args_sparsecompressed_indices plain_indicesargs_sparse_compressed args_nestedarg_wrapper_subclass v3_dtypes rebuild_funcrr!metadatas r)rzTensor._reduce_ex_internal?sY&t,7 $)4''::DD    2 2 K K ! ;;  6 6%%d+   EHH$J$J$LL"jJ CCTZZT[[)94;M;MN  ;;  v % g diik" "" H LL@@(K K   "v||~!8!88++LLN%%'$ ((66$,,--/224++- $ #I$,,.IYZ  **!%!4!4!6!G!G**"+ ##%diik"  "" LLL11<@ @ ^^{{e...KK]]_dllndiik4CTCTCVW *>t{{m1MLL77E E [[               {{u//1A1ABB%%'$$&%2" %%'$$&%2"  &!KKMIIK & "LL779OP P ^^"| ((*++-335KLL77E E Jell *T --U\\5T5TT4!2!2!D!D!U!UV"4):):)F)F)Q)QR 1, T  diik" ##%  "" $ LL::!>!I!IJ"'?T  diik" ##%  "" $ LL::> ..0 %||>> --44!%!4!4!6!G!G**"5}-tU%6%6%B%B%M%MN'+{{$##%diik" "" D'5==#?#?@tzzm+||77=Hxk) $' 'r-c*t|rttj|f||S|js t dt |dk(r|j|yt |dk(r|d|_|d|d|df}|\|_ }|_ y)Nz/__setstate__ can be only called on leaf Tensorsrr) rrrr7rjrklenrdatarr)r r>_s r)r7zTensor.__setstate__s #D )()<)` for details on the memory layout of accumulated gradients. .. note:: If you run any forward ops, create ``gradient``, and/or call ``backward`` in a user-specified CUDA stream context, see :ref:`Stream semantics of backward passes`. .. note:: When ``inputs`` are provided and a given input is not a leaf, the current implementation will call its grad_fn (though it is not strictly needed to get this gradients). It is an implementation detail on which the user should not rely. See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details. Args: gradient (Tensor, optional): The gradient of the function being differentiated w.r.t. ``self``. This argument can be omitted if ``self`` is a scalar. Defaults to ``None``. retain_graph (bool, optional): If ``False``, the graph used to compute the grads will be freed; If ``True``, it will be retained. The default is ``None``, in which case the value is inferred from ``create_graph`` (i.e., the graph is retained only when higher-order derivative tracking is requested). Note that in nearly all cases setting this option to True is not needed and often can be worked around in a much more efficient way. create_graph (bool, optional): If ``True``, graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to ``False``. inputs (Sequence[Tensor], optional): Inputs w.r.t. which the gradient will be accumulated into ``.grad``. All other tensors will be ignored. If not provided, the gradient is accumulated into all the leaf Tensors that were used to compute the :attr:`tensors`. Defaults to ``None``. )gradient retain_graph create_graphinputs)rN)rrrbackwardr:autograd)r rrrrs r)rzTensor.backward:sYZ $D )(!))   (L,v  r-cjt|rttj|f||S|js t d|j 6t|_|j|jj|ddl m }||j }||j |j<|S)a3Registers a backward hook. The hook will be called every time a gradient with respect to the Tensor is computed. The hook should have the following signature:: hook(grad) -> Tensor or None The hook should not modify its argument, but it can optionally return a new gradient which will be used in place of :attr:`grad`. This function returns a handle with a method ``handle.remove()`` that removes the hook from the module. .. note:: See :ref:`backward-hooks-execution` for more information on how when this hook is executed, and how its execution is ordered relative to other hooks. Example:: >>> v = torch.tensor([0., 0., 0.], requires_grad=True) >>> h = v.register_hook(lambda grad: grad * 2) # double the gradient >>> v.backward(torch.tensor([1., 2., 3.])) >>> v.grad 2 4 6 [torch.FloatTensor of size (3,)] >>> h.remove() # removes the hook @cannot register a hook on a tensor that doesn't require gradientrRemovableHandle) rrr register_hookrrkrrgrad_fn_register_hook_dictrrrlr hookrhandles r)rzTensor.register_hookusB $D )()=)=wdS S!!R     '#.=D ||' 0065 !5!56*.VYY' r-cJt|rttj|f||S|js t d|j t d|jt|_ddl m }||j}||j|j<|S)aRegisters a backward hook that runs after grad accumulation. The hook will be called after all gradients for a tensor have been accumulated, meaning that the .grad field has been updated on that tensor. The post accumulate grad hook is ONLY applicable for leaf tensors (tensors without a .grad_fn field). Registering this hook on a non-leaf tensor will error! The hook should have the following signature:: hook(param: Tensor) -> None Note that, unlike other autograd hooks, this hook operates on the tensor that requires grad and not the grad itself. The hook can in-place modify and access its Tensor argument, including its .grad field. This function returns a handle with a method ``handle.remove()`` that removes the hook from the module. .. note:: See :ref:`backward-hooks-execution` for more information on how when this hook is executed, and how its execution is ordered relative to other hooks. Since this hook runs during the backward pass, it will run in no_grad mode (unless create_graph is True). You can use torch.enable_grad() to re-enable autograd within the hook if you need it. Example:: >>> v = torch.tensor([0., 0., 0.], requires_grad=True) >>> lr = 0.01 >>> # simulate a simple SGD update >>> h = v.register_post_accumulate_grad_hook(lambda p: p.add_(p.grad, alpha=-lr)) >>> v.backward(torch.tensor([1., 2., 3.])) >>> v tensor([-0.0100, -0.0200, -0.0300], requires_grad=True) >>> h.remove() # removes the hook rzCpost accumulate grad hooks cannot be registered on non-leaf tensorsrr) rrr"register_post_accumulate_grad_hookrrkr_post_accumulate_grad_hooksrrrrlrs r)rz)Tensor.register_post_accumulate_grad_hooksL $D )(99D7D$ !!R  << #U   + + 3?J}D ,5 !A!AB6:((3 r-c*d}t|d)Ncdj|jdDcgc]}|jc}Scc}w)N )joinsplitstrip)rlines r)trimzTensor.reinforce..trims-99syyGtdjjlGH HGs=areinforce() was removed. Use torch.distributions instead. See https://pytorch.org/docs/main/distributions.html Instead of: probs = policy_network(state) action = probs.multinomial() next_state, reward = env.step(action) action.reinforce(reward) action.backward() Use: probs = policy_network(state) # NOTE: categorical is equivalent to what used to be called multinomial m = torch.distributions.Categorical(probs) action = m.sample() next_state, reward = env.step(action) loss = -m.log_prob(action) * reward loss.backward() )rk)r rewardrs r) reinforcezTensor.reinforces$ I     r-a Returns a new Tensor, detached from the current graph. The result will never require gradient. This method also affects forward mode AD gradients and the result will never have forward mode AD gradients. .. note:: Returned Tensor shares the same storage with the original one. In-place modifications on either of them will be seen, and may trigger errors in correctness checks. z Detaches the Tensor from the graph that created it, making it a leaf. Views cannot be detached in-place. This method also affects forward mode AD gradients and the result will never have forward mode AD gradients. ct|rttj|f|S|j j S)zaChecks if tensor is in shared memory. This is always ``True`` for CUDA tensors. )rrr is_sharedru _is_sharedrs r)rzTensor.is_shareds; $D )()9)9D7DI I""$//11r-ct|rttj|f|S|j j |S)aMoves the underlying storage to shared memory. This is a no-op if the underlying storage is already in shared memory and for CUDA tensors. Tensors in shared memory cannot be resized. See :meth:`torch.UntypedStorage.share_memory_` for more details. )rrr share_memory_ru_share_memory_rs r)rzTensor.share_memory_'s= $D )()=)=wM M ,,. r-ct||r ttj||f|||S|r|j S|j |j S)aDefines how to transform ``other`` when loading it into ``self`` in :meth:`~nn.Module.load_state_dict`. Used when :func:`~torch.__future__.get_swap_module_params_on_conversion` is ``True``. It is expected that ``self`` is a parameter or buffer in an ``nn.Module`` and ``other`` is the value in the state dictionary with the corresponding key, this method defines how ``other`` is remapped before being swapped with ``self`` via :func:`~torch.utils.swap_tensors` in :meth:`~nn.Module.load_state_dict`. .. note:: This method should always return a new object that is not ``self`` or ``other``. For example, the default implementation returns ``self.copy_(other).detach()`` if ``assign`` is ``False`` or ``other.detach()`` if ``assign`` is ``True``. Args: other (Tensor): value in state dict with key corresponding to ``self`` assign (bool): the assign argument passed to :meth:`nn.Module.load_state_dict` )assign)rrr module_loaddetachcopy_)r otherrs r)rzTensor.module_load4sY( 'tU 3(""T5M4v  <<> !::e$++- -r-ct|rttj|f|S|j dk(r|S|j dS)z&Reverses the tensor along dimension 0.r)rrr __reversed__dimfliprs r)rzTensor.__reversed__RsA #D )()<)r?r@rArBrCrDrE)rDrE)rrrstftr:) r r<r=r>r?r@rArBrCrDrEs r)rGz Tensor.stftsz& $D )( %%!%!- / zz         )+  r-lengthc t|r&ttj|f||||||||||  St j||||||||||  S)zSee :func:`torch.istft`)r=r>r?r@rBrCrHrD)rD)rrristftr:) r r<r=r>r?r@rBrCrHrDs r)rJz Tensor.istftst $D )( %%%!-  {{         )  r-ct|rttj|f|g|St j dddlm}|j||S)Nz non-inplace resize is deprecatedrResize) rrrresizerrtorch.autograd._functionsrMapply)r sizesrMs r)rNz Tensor.resizesE #D )(NN N 894||D%((r-ct||rttj||f||St j dddlm}|j||jS)Nz#non-inplace resize_as is deprecatedrrL) rrr resize_asrrrOrMrPr)r tensorrMs r)rSzTensor.resize_assO &tV 4()9)9D&>4QWX X ;<4||D&++-00r-cpt|rttj|f|||St |tr t |}t |t tjfr!tjj|||Stjj|||S#t $rYmwxYw)zSee :func:`torch.split`)r ) rrrr rint ValueErrorr:SymInt_VFsplit_with_sizes)r split_sizer s r)r z Tensor.splits #D )( tgtZS  j& )  _  j3 "5 699??4S9 999--dJD D   s B)) B54B5c t|r!ttj|f|||||St j|||||S)z[Returns the unique elements of the input tensor. See :func:`torch.unique` )sortedreturn_inverse return_countsr )rrruniquer:)r r]r^r_r s r)r`z Tensor.uniquesV $D )( -+ || )'   r-ct|r ttj|f||||St j||||S)zEliminates all but the first element from every consecutive group of equivalent elements. See :func:`torch.unique_consecutive` )r^r_r )rrrunique_consecutiver:)r r^r_r s r)rbzTensor.unique_consecutivesR $D )())-+  '' }RU  r-rrcBtjj||Sr$)rp_VariableFunctionsrsubr rs r)__rsub__zTensor.__rsub__1s$$))$66r-c(|j|zSr$) reciprocalrfs r)__rdiv__zTensor.__rdiv__5s 5((r-ztorch._C.TensorBasec.tj||Sr$)r: remainderrfs r)__rmod__zTensor.__rmod__Jsud++r-c8t|rttj|f||S|j dk(rJ|j s>t |tur-|jjj|Stj||S)Nr) rrr __format__r is_metar2ritemobject)r format_specs r)rozTensor.__format__Nsv #D )():):TGT;W W 88:?4<z2Tensor.__cuda_array_interface__.. s@QAL@srFr)typestrrstridesrversion)rrrr__get__rAttributeErrorr2rnrrkrRrQ element_sizerr is_contiguousrnumelrsr3)r rrrrsrrs @r)rzTensor.__cuda_array_interface__s< $D )(//77 || Ntyy{m\^^  >> Ediik]SLL    l  $DJJ/$$&djj!    G@$++-@@G&*jjlQ&64==?A% G5'VWXXr-ct|rttj|f|Stj j |jjS)zUstorage_type() -> type Returns the type of the underlying storage. ) rrr storage_typer:rrru_get_legacy_storage_classrs r)rzTensor.storage_typesK $D )()<)>@@r-ct|rttj|f|g|St ||j d}t | |S)aRefines the dimension names of :attr:`self` according to :attr:`names`. Refining is a special case of renaming that "lifts" unnamed dimensions. A ``None`` dim can be refined to have any name; a named dim can only be refined to have the same name. Because named tensors can coexist with unnamed tensors, refining names gives a nice way to write named-tensor-aware code that works with both named and unnamed tensors. :attr:`names` may contain up to one Ellipsis (``...``). The Ellipsis is expanded greedily; it is expanded in-place to fill :attr:`names` to the same length as ``self.dim()`` using names from the corresponding indices of ``self.names``. Python 2 does not support Ellipsis but one may use a string literal instead (``'...'``). Args: names (iterable of str): The desired names of the output tensor. May contain up to one Ellipsis. Examples:: >>> imgs = torch.randn(32, 3, 128, 128) >>> named_imgs = imgs.refine_names('N', 'C', 'H', 'W') >>> named_imgs.names ('N', 'C', 'H', 'W') >>> tensor = torch.randn(2, 3, 5, 7, 11) >>> tensor = tensor.refine_names('A', ..., 'B', 'C') >>> tensor.names ('A', None, None, 'B', 'C') .. warning:: The named tensor API is experimental and subject to change. refine_names)rrrrrnamessuper)r rr9s r)rzTensor.refine_namessLN $D )()<)>> tensor = torch.randn(2, 2, 2, 2, 2, 2) >>> named_tensor = tensor.refine_names('A', 'B', 'C', 'D', 'E', 'F') # Move the F and E dims to the front while keeping the rest in order >>> named_tensor.align_to('F', 'E', ...) .. warning:: The named tensor API is experimental and subject to change. align_to)rrrrrrr)r r ellipsis_idxnamer9s r)rzTensor.align_toIsvH $D )(4'4P%P P,UJ?  7#E* *w# =d;t+ Tensor See :func:`torch.unflatten`. z"unflatten: sizes must be non-emptyNr) rrr unflattenrkrrrrrr)r r rQrr9s r)rzTensor.unflattenvs $D )()9)9D7D#uU UCD D e[ ) uudm ,E!Hudm1T+E2LE57$S%7 77$S%0 0r-cvt|r ttj|f|g|i|St |||dS)z+In-place version of :meth:`~Tensor.rename`.Tinplace)rrrrename_rr r rename_maps r)rzTensor.rename_sI $D )(059C D%TBBr-cvt|r ttj|f|g|i|St |||dS)a~Renames dimension names of :attr:`self`. There are two main usages: ``self.rename(**rename_map)`` returns a view on tensor that has dims renamed as specified in the mapping :attr:`rename_map`. ``self.rename(*names)`` returns a view on tensor, renaming all dimensions positionally using :attr:`names`. Use ``self.rename(None)`` to drop names on a tensor. One cannot specify both positional args :attr:`names` and keyword args :attr:`rename_map`. Examples:: >>> imgs = torch.rand(2, 3, 5, 7, names=('N', 'C', 'H', 'W')) >>> renamed_imgs = imgs.rename(N='batch', C='channels') >>> renamed_imgs.names ('batch', 'channels', 'H', 'W') >>> renamed_imgs = imgs.rename(None) >>> renamed_imgs.names (None, None, None, None) >>> renamed_imgs = imgs.rename('batch', 'channel', 'height', 'width') >>> renamed_imgs.names ('batch', 'channel', 'height', 'width') .. warning:: The named tensor API is experimental and subject to change. Fr)rrrrenamerrs r)rz Tensor.renamesJD $D )( w/48B  D%UCCr-c"|jS)zConvert a tensor to :ref:`coordinate format `. Examples:: >>> dense = torch.randn(5, 5) >>> sparse = dense.to_sparse_coo() >>> sparse._nnz() 25 ) to_sparsers r) to_sparse_coozTensor.to_sparse_coos~~r-)ambiguity_checkrc(t|rttj|f|S|jrt d|j dt|tsGt|ts td|D]'}t|tjrtdd}d}t|tr|ng}t|tr|nd}|r||r|||r tddd lm}t!|j#|S) a dim_order(ambiguity_check=False) -> tuple Returns the uniquely determined tuple of int describing the dim order or physical layout of :attr:`self`. The dim order represents how dimensions are laid out in memory of dense tensors, starting from the outermost to the innermost dimension. Note that the dim order may not always be uniquely determined. If `ambiguity_check` is True, this function raises a RuntimeError when the dim order cannot be uniquely determined; If `ambiguity_check` is a list of memory formats, this function raises a RuntimeError when tensor can not be interpreted into exactly one of the given memory formats, or it cannot be uniquely determined. If `ambiguity_check` is False, it will return one of legal dim order(s) without checking its uniqueness. Otherwise, it will raise TypeError. Args: ambiguity_check (bool or List[torch.memory_format]): The check method for ambiguity of dim order. Examples:: >>> torch.empty((2, 3, 5, 7)).dim_order() (0, 1, 2, 3) >>> torch.empty((2, 3, 5, 7)).transpose(1, 2).dim_order() (0, 2, 1, 3) >>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).dim_order() (0, 2, 3, 1) >>> torch.empty((1, 2, 3, 4)).dim_order() (0, 1, 2, 3) >>> try: ... torch.empty((1, 2, 3, 4)).dim_order(ambiguity_check=True) ... except RuntimeError as e: ... print(e) The tensor does not have unique dim order, or cannot map to exact one of the given memory formats. >>> torch.empty((1, 2, 3, 4)).dim_order( ... ambiguity_check=[torch.contiguous_format, torch.channels_last] ... ) # It can be mapped to contiguous format (0, 1, 2, 3) >>> try: ... torch.empty((1, 2, 3, 4)).dim_order(ambiguity_check="ILLEGAL") ... except TypeError as e: ... print(e) The ambiguity_check argument must be a bool or a list of memory formats. .. warning:: The dim_order tensor API is experimental and subject to change. z$Can't get dim order on sparse type: rzHThe ambiguity_check argument must be a bool or a list of memory formats.cNd}|D]}|j|s|dz }|dk7S)z Returns True if the tensor cannot be uniquely mapped to any of the given memory formats, False otherwise. r) memory_format)r)rTvalid_memory_formats n_legalityrs r)invalid_unique_memory_formatz6Tensor.dim_order..invalid_unique_memory_formats> J!5 $ ''m'D!OJ $? "r-c |j}|j}tdt||ddD}td|D}|xs|S)aG Returns True if there're multiple legal dim orders for given tensor, False otherwise. The tensor is considered to have multiple legal dim orders if either of the following conditions is met: * Singleton Dimensions: There's at least one singleteon dimension in the tensor. Since their size is 1, they don't affect the memory offset (stride * index is zero because index is always zero). Therefore, they can be placed anywhere in the dimension order without changing how data is accessed. * Same strides: Strides reflect how the tensor is stored in memory. If any two dimensions have the same stride, swapping these dimensions won't change how data is accessed, leading to multiple correct dimension orders. c3,K|] \}}||k(ywr$r)rearlierlaters r)rzCTensor.dim_order..has_multiple_dim_order..9s(%3We5 (srNc3&K|] }|dk( yw)rNr)rrs r)rzCTensor.dim_order..has_multiple_dim_order..>s$A4TQY$As)rrrzip)rTrQrhas_duplicate_strideshas_singleton_dimss r)has_multiple_dim_orderz0Tensor.dim_order..has_multiple_dim_order&s`KKMEmmoG%((7:7GABK7P(% ! "%$A5$A!A (>,> >r-TzbThe tensor does not have unique dim order, or cannot map to exact one of the given memory formats.rN)rrr dim_orderrnrr2rrOrr%r:rrktorch._prims_common _prims_commonr3compute_elementwise_output_logical_to_physical_perm)r rrrrrcheck_multiple_dim_orderutilss r)rzTensor.dim_orders d $D )()9)9D7DI I >> 6tyy{mDLL  /40ot4^"1  !-1D1DE#b  # ?: */4@Ob  */4@Od ! %)?)E*41EFt  ,UNNtTUUr-ct|rttj|f|||S|rt||St||Sr$)rrr _update_namesrrr)r rrr9s r)rzTensor._update_namesTsN #D )($$tgtUG  7?5) )7>%( (r-c|i}tfd|DstStj5||i|}|t vr |cdddSt |cdddS#1swYyxYw)a This __torch_function__ implementation wraps subclasses such that methods called on subclasses return a subclass instance instead of a ``torch.Tensor`` instance. One corollary to this is that you need coverage for torch.Tensor methods if implementing __torch_function__ for subclasses. We recommend always calling ``super().__torch_function__`` as the base case when doing the above. While not mandatory, we recommend making `__torch_function__` a classmethod. Nc36K|]}t|ywr$) issubclass)rtclss r)rz,Tensor.__torch_function__..rs5!:c1%5)allr&rpDisableTorchFunctionSubclassr_convert)rr1typesr!r"r4s` r)__torch_function__zTensor.__torch_function__`sx >F5u55! !  , , . *''C355 * *  S)  * * *sA) A))A2stream max_version dl_devicerrrrrc|t|r)|f}||||d}ttj|fg|i|S|jr t d|j r t d|jtjk7r t d|jjdk(rw|jjtjjk7rBt d|jjdtjjd|t|tur t!d |jjdk(r |d k7rtj"j$d u}| }||r|d k(s|r$|d k(rtjj'}nR|r|dk(r t d|rdnd} |r|d k7s|r|dvsJd| d|dtjj)|}tjj+} || k7rctjj-} | j/| |j1| n"|jjdk(r |Jd|jjdk(rhd d l} d d lmcm} t;| j=d ks#d| j=d j?vr tAd| jC|S||d d krtEjF|||StEjH|||S)a\ Creates a DLpack `capsule https://data-apis.org/array-api/latest/design_topics/data_interchange.html#data-interchange`_ of the current tensor to be exported to other libraries. This function will be called from the `from_dlpack` method of the library that will consume the capsule. `from_dlpack` passes the current stream to this method as part of the specification. Args: stream (integer or None): An optional Python integer representing a pointer to a CUDA stream. The current stream is synchronized with this stream before the capsule is created, and since the capsule shares its storage with the tensor this make it safe to access from both streams. If None or -1 is passed then no synchronization is performed. If 1 (on CUDA) or 0 (on ROCM) then the default stream is used for synchronization. max_version (tuple[int, int] or None): An optional Python tuple with 2 integers, representing the maximum version the caller supports. If None (default), PyTorch will fallback to DLPack 0.8. dl_device (tuple[DLDeviceType, int] or None): An optional tuple specifying in which device the exported DLPack capsule should be on. If None (default), the exported DLPack capsule will be on the same device as ``self``. copy (bool or None): An optional boolean indicating whether or not to copy ``self``. If None, PyTorch will copy only if necessary. rz?Can't export tensors that require gradient, use tensor.detach()z/Can't export tensors with the conjugate bit setz9Can't export tensors with layout other than torch.stridedcudazACan't export tensors on a different CUDA device index. Expected: z. Current device: rNz"stream must be ``int`` or ``none``rrrz+per-thread default stream is not supported.CUDAROCm)rrzunsupported stream on z: rzstream should be None on cpu.r`z9Can't export to dlpack an XLA tensor that is not on CUDA.)rr)%rrr __dlpack__r BufferErrorrrr:stridedror2indexrcurrent_devicerVr%rhipdefault_streamExternalStreamcurrent_streamEventrecord wait_event torch_xlatorch_xla.utils.dlpackrdlpackr real_deviceslowerrk to_dlpackrp _to_dlpack_to_dlpack_versioned)r rrrrr!r"is_rocmr device_strr eventr xla_dlpacks r)rzTensor.__dlpack__~sH $D )7D *& F )):):TGUdUfU U   Q  <<>OP P ;;%-- 'K  KK   & !!UZZ%>%>%@@![[../0##(::#<#<#>"?qB   $v,c"9@A A [[   'FbLmm''t3G!kG~'fkw6UV;224v{%&STT'.VF FaKf 4D+J B#B B> ;;  u $  7 7I**,-2!7!7!9!!R^^  F$bnn   G2 .< */  > E%*% & >" " 4 8 &%)$(%)! #')-*./ / SM/ SM / # /  / / / 4./ !/ "$/ h%)$(%) #' $$' ' SM' SM ' # '  ' ' 4.'  ' ' R)1E" . $C7eHc5$$GH7X7C7C)eHc5$$GH)X)C)L==))L "E(Cg*M$N O    F MM   GM HC,eHc5$$GH,X,C,4C&eHc5$$GH&X&C&C/%#ud(B"C//C/ C/53t)C#D//C/C58S%w>?5 5C5 C68S%w>?6 6C6 C)C)mm$$GmmGmmG $, :' C t &/Y/Yb A*+X+ Z1, C(DT  LQ@V"'d53F3F.G(G"H@VD )**699 !%158<# zM zMeCHo. zM E$,,"345 zM tn zMx#"5s):#;#"JJr-ctur|St|trt|s|j}t|ttfrt |fd|D}|S)Nc36K|]}t|ywr$)r)rrrs r)rz_convert..+s6QC(6r)rrr/rrr2)r4rs `r)rr"sY f} #vz#s';ooc"#t}%d3i6#66 Jr-)-rrGr*r collectionsrrrnumbersrtypingrrrr r r typing_extensionsr r r:torch._Crptorch._namedtensor_internalsrrrrrrtorch.overridesrrrrrrr4rr,r5r?rRrrrr-r)rTs #@@4 t_m2==9  KO,h67 k+r/*H45$" :sUXX sl5 r-