L i>qddlmZddlZddlmZddlmZddlmZddl m cm Z ddl m Z mZmZmZmZmZddlmZddlmZmZmZmZmZmZmZmZdd lmZdd l m!Z!dd l"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+dd Z,dd Z- d dZ.ddZ/d!dZ0edGddeZ1edGdde1Z2edGddZ3GddZ4edGddZ5 d"dZ6y)#) annotationsN)ABC) dataclass)Any) getValueT isValueType LazyArgumentLazyIrProperties LazyIrSchematensorListValueT) translate) BaseCTypeBindingdeviceTDispatcherSignaturekernel_signatureNativeSignature OptionalCType VectorCType)method_with_native_function)ts_lowering_body) Argument BackendIndexBackendMetadataBaseTyBaseTypeFunctionSchemaListTypeNativeFunctionNativeFunctionsGroupct|jr t|jtrs|jrd|j S|jj turd|j dS|jrd|j dSd|j dSt|jtr`|jr|j d|j dS|jrd|j Sd|j d |j d Std |jdt|jtri|jjttj k(r9|j"rd |j dSd |j d|j dSt|jt$rbt|jjtr>d|jjj d|j d|j dSt|jtrt|jjt$rit|jjjtr;d|jjjj d|j dS|j S)z Given a LazyArgument, generate a c++ string for materializing an rvalue of that arg for passing into a lazy Node constructor. node_lazy_ _tensorlistzGetSymIntValue()z->GetIrValue()z& ? std::make_optional(GetSymIntValue(*z)) : ::std::nulloptz ? std::make_optional(lazy_z ->GetIrValue()) : ::std::nullopt=TODO not sure if there are other valid types to handle here (zGetSymIntArrayRefValue(zstd::vector(z .begin(), z.end()) std::vector<>(ztorch::lazy::ToOptionalVector<)r lazy_type isinstanceris_wrapped_scalarnametyper is_symint_or_listrAssertionError orig_typerelemrrSymIntsymintr)args [/mnt/ssd/data/python-lab/Trading/venv/lib/python3.12/site-packages/torchgen/dest/lazy_ir.pynode_ctor_arg_rvalue_stringr6+sn3==! cmmY /$$sxxj))##'77sxxj 44&&( !44388*N3 3  } 5$$((#I#((Sfgg&&sxxj))z"++.88*5!!  !OPSP]P]^_`  cmmX .3==3E3E MMJ 4 zz0 !<<.sxxj 388*GTT  { 3 MM   9 "#--"4"4"9"9!:"SXXJjQTQYQYPZZab b s}}m 43==--{;3==--22I>3CMM4F4F4K4K4P4P3QQSTWT\T\S]]^_ _hhZ cz|jDcgc] }t|}}dj|Scc}w)zg Produce a formatted string with the arguments as passed into the constructor of a node class. , ) filtered_argsr6join)schemar4node_ctor_valuess r5node_ctor_inputsr>fsE 5;4H4H4J-0#C( 99% &&s8cttj|j}t|j |j }dj |Dcgc]}|j c}}t|rd|jd|d}nd|jd}dt|d|d|d Scc}w) zK Generate code that falls back to eager conditioned on a predicate z, z ATEN_OP2(r9r%zATEN_OP(z" if (force_eager_fallback(zQ)) { return at::native::call_fallback_fn_symint<<c_eager_fallback, z>::call( z ); } ) r from_schemafuncr argumentsr;exprlen aten_name aten_symbol)r<sig overload_namedispatcher_sigexprsa fallback_args aten_op_strs r5gen_fallback_coderNps)44V[[AN cmmo~'?'?'A BE)../F1/FGM =!&"2"2!32m_AF  !1!1 2!4 ""-f"5!67MMXMZ   0GsB5cdh}|j|vrd|jdS|jjdsd|jS|jS)Nsigmoid_backwardz#c10::Symbol::fromQualString("aten::z")zat::z at::aten::)rE startswith)r<missing_interned_stringss r5rFrFsh 334V5E5E4FbII    & &v .F,,-..r7cg}g}|jD]}t|jtrv|jjj rR|j d}|jd|d|j d|j|j||j|dj|}||fS)N_metazauto z = to_meta(); ) rBr*argumentrr-is_tensor_liker,append with_namer;)rGcontextunwrapped_tensor_argsr4unwrapped_nameunwrap_tensor_args_strs r5convert_to_meta_tensorsr_sG')}}  cllH -#,,2C2C2R2R2T #z/N ! ( ('{388*B?  NN3==8 9 NN3  *../DE !7 **r7T)frozencpeZdZUded<ded<ded<ded<eddZdd Zdd Zdd Zdd Z dd Z y) GenLazyIRr backend_indexstr backend_name node_basebooluse_lazy_shapec8t|tr|jjn |j}|jj t|tr |jn|}t ||duxr|j}|j|S)Nr3) r*r functionalrArc get_kernelr supports_symintgen)selffrAmetadatar<s r5__call__zGenLazyIR.__call__s$.q2F$Gq||  QVV%%00&q*>?ALLQ  -L(2J2J2L xxr7cyN)ror<s r5lowering_functionzGenLazyIR.lowering_functionr7cyrtrvror<node_ctor_argss r5create_functionzGenLazyIR.create_functionrxr7cd|dS)Nbool CanBeReused(z!) const { return false; }rvrzs r5can_be_reused_functionz GenLazyIR.can_be_reused_functions$^$45  r7c N|jdd}g}|D]}t|jttfr|j |j @t|jtr|j |j dytd|jddj|}|jdd}|jjrd}n|jjrT|Dcgc]}|j } }| jd |Dd |j d dj| d }nx|jjr`tt!|D cgc]} d | d } } | jd|Dd|j d dj| d}nd}djd|D} |j"d|j$d|d|dt!|j&d| d Scc}wcc} w)NTFvaluesscalars.value_or(kNullValue)zUnsupported type (z) - add support if necessaryr9zstd::move(shapes),c34K|]}|jywNr,.0rKs r5 z0GenLazyIR.node_base_ctor_call..:aff:compute_shape_(z),zoperand(r%c34K|]}|jywrrrs r5rz0GenLazyIR.node_base_ctor_call..rrz[&](){ return compute_shape_z)[0]; },ruc36K|]}|jywrrrs r5rz0GenLazyIR.node_base_ctor_call..s!C!QVVH+!Csz( z&::ClassOpKind(), OpList{z}, z! /* num_outputs */ z#, torch::lazy::MHash()))r:r*r)rrrYr,rr/r; propertiesShapePrecompute ShapeComputeextend ShapeCacherangerDrf node_namereturns) ror< value_argsbase_ctor_value_args_listr4base_ctor_value_args scalar_argsshape_ctor_argrK shape_argsi scalar_hashess r5node_base_ctor_callzGenLazyIR.node_base_ctor_calls))u)E %'! C#--)[)AB)00CHH:?CMM=9)00CHH:=R1ST$(7ST   $yy)BC**%*F     , ,1N    + +*45Q!&&5J5   :k: :-fkk]!DIIj H"c|jxs t|}|j}|jdd}|Dcgc],}d|jj d|j .}}dj |}|jr'|jjr|jddj |}dj |D cgc]d} | jj d k(r)| j d | j d | j d n| j d | j d fc} } t| rd| } dj |D cgc]} | jj dk(rd| j dnV| jj d k(rd| j dn)| jj d| j dc} } |jddD cgc](} t| jtr | j *} } dj | Dcgc]}d|d c}}dj | Dcgc] }d|d|d c}}g}|D]} t| jtrZ| j d}| jrd}|jd| j d| j d|d| j d w|jd | j d| j ddj |}d!|jd"|j d#|d$|jd |d%|j#|| d&|d'|j d(|d)|j%||d*|j'||d*|j)|d*| d|d+gScc}wcc} wcc} wcc} wcc}wcc}w),NFTrzconst z& r9z(std::vector&& shapesz , z!::std::optionalrz1.has_value() ? ::std::make_optional(std::string(*z)) : ::std::nullopt)r%z zc10::string_viewz std::string ;z::std::optional  z bool has_z: 1;z has_z = !!z.value()z"torch.Generator()"zif (z.has_value()) { ss << ", z=" << z; } else { ss << ", z =null"; }z ss << ", zclass z : public zX { public: static torch::lazy::OpKind ClassOpKind() { return torch::lazy::OpKind(z ); } z ) : z { zT } std::string ToString() const override { std::stringstream ss; ss << z::ToString(); z return ss.str(); } z z }; )opkindrFr:r)cpp_typer,r;rhrrrYrDr*r is_generatorrrfrr|rrw)ror<rall_argsrr ctor_argsreuse_ctor_argsr{rKscalar_initializers scalar_declsr4optional_valuesvaluehas_optional_declshas_optional_defsmembers_to_stringmembers_to_string_strs r5rnz GenLazyIR.gens^5+f"5'')**%*F JRSQvakk2245Rx@S S))I.   6#4#4#D#D   G H9-+00 %   ;;'')-PP66(!AFF8#TUVU[U[T\\pqxq*+   " #$/0C/D"E {{%    ;;'')-??qvvha(;;'')-PP5QVVHA> ,,./q: ;   ++4+G #--7 HH  $[[2A Byt $ B %MM6E FUtE7%wa ( F  RC#--788*H-##1E!((SXXJ'zw'z "((9SXXJfSXXJa)PQ R!) .? @  4>>"23 !'x( An%& ! !& )*+>*?@ >> 012v789&!"#.7 ! ! oT   C Gs&1OA)O'B O -O% O*/O/N)rpz%NativeFunctionsGroup | NativeFunctionreturn list[str]r<r rrdr<r r{rdrrd)r<r rr) __name__ __module__ __qualname____annotations__rrrrwr|rrrnrvr7r5rbrbsFN  !  (6T` r7rbc$eZdZddZddZddZy) GenTSLazyIRcd}|jjr|dS|jjr|dt|dSy)Nz torch::lazy::TSOpVector Lower( std::shared_ptr function, torch::lazy::TSLoweringContext* loctx) const overriderz { z } ru)r LowerDeclOnlyLowerr)ror< signatures r5rwzGenTSLazyIR.lowering_functionVs^?    * *[? "    $ $!{#f   r7cd|d}|jjr|dS|jjsy|d|jdS)Nzstatic NodePtr Create(r%rruz { return ReuseOrMakeNode(data); })rCreateFnDeclOnlyCreateFnr)ror<r{rs r5r|zGenTSLazyIR.create_functionfsc,^,    - -[? """++;",,-. r7c`d|d}|jjr|dS|jjsyg}tj|j |j D]Z}t|jtr |jd|jd=|jd|j\tj|j|jD]}t|jtra|jd|jd |jd |jd |jd |jd |jd ~|jd|jd|jdj|}|d|dS)Nr~z) constrruznullable_operand(i++) == rzoperand(i++) == z ((!this->z&&!z ) || (this->z&&z && *(this->z) == *rzthis->z == z && z! { size_t i = 0; return (z); })rCanBeReusedDeclOnly CanBeReused itertoolschainpositional_valueskeyword_valuesr*r)rrYr,positional_scalarskeyword_scalarsr;)ror<r{rvalue_comparisonr4value_comparison_strs r5rz"GenTSLazyIR.can_be_reused_functionps''7w?    0 0[? """..??6#;#;V=R=RS GC#--7 ''/z9NO!''*:388*(EF  G??6#<#T>TU KC#--7 ''zSXXJl388*BsxxjXdehememdnntuxu}u}t~AB!''& $sxxj(IJ  K /334DE; ! "# r7Nrr)rrrrwr|rrvr7r5rrTs r7rc eZdZUded<ded<ded<ded<ded<ded <ded <ded <ded <ded <ded<ded<ded<ded<ded<ddZ ddZddZddZddZddZ dd dZ ddZ e d!dZ y)"GenLazyNativeFuncDefinitionrdclass_method_namerrc tensor_classrggen_forced_fallback_codebackend_namespaceget_tensorlistget_tensor_or_wrap_numbertry_get_tensormetrics_counter create_tensorcreate_from_first_tensorcreate_aten_from_ltc_tensortuple_aten_from_ltc_tensorslazy_tensor_ptr get_device_fnc |jdd}g}|D]9}|jrt|jtr:|j d|j d|j d|j dd|j d|j d|j d |jrt|jtr|jjturH|j d |j d |jd |jd |j d |j |jd|j d|jd |jd |j d pt|jtr|jjtt!k(sJ|jj|j |jd|j d|jd |j"d |j d #t%d|jddj'|S)NTFrz auto node_z = z ? std::make_optional(torch::lazy::LazyGraphExecutor::Get()-> GetIrValueForScalarFromCodegen(*z3, *common_device)): ::std::nullopt;zf = torch::lazy::LazyGraphExecutor::Get()-> GetIrValueForScalarFromCodegen(z, *common_device);z auto lazy_z_tensorlist = ::rrUz lazy_z.value_or(at::Tensor()));r&r%rV)r:r+r*r)rrYr,r.rr-r rrrrr1rrr/r;)rorAr<rlazy_tensor_declsr4s r5rz-GenLazyNativeFuncDefinition.lazy_tensor_declssK))u)E ')& C$$cmm];%,,&sxxjCHH:>558XXJ? #&,,&sxxj1<>+G+G r7c |jdS)Nr)r)rorAr<s r5metricsz#GenLazyNativeFuncDefinition.metricss&&'q))r7c|jdd}|jdd}|Dcgc]}|jr|j}}tt t }|Dcgc]}|j |k(s|j }}t|dkDst|dkDsJd|jddj||zd} d | d Scc}wcc}w) NTFrrz*Expected at least one Value or Device typerr9r%zauto common_device = z8; TORCH_INTERNAL_ASSERT(common_device); ) r:r+r,rrrr)rDrr;) rorAr<rrrKvalue_types_namesoptional_deviceoptional_devicesget_device_strs r5 get_devicez&GenLazyNativeFuncDefinition.get_devices ))u)E **%*F 2<XQADWDWx[XX' '(:;' 1;;/+IAFF  $%)S1A-BQ-F 8 F!!"!DII.?BR.R$S#TTU V )(89   Y sCC*C!?C!c |jj|}|J|j}t|j}d|j v}|j xs|jdu}|s|rd}|dkDr:dd} djt|D cgc] } | |  c} } d| zdz}tj|j} t| \} }t|| jd Dcgc]}|j }}|r|j"sJd }nd }|j$}|jj'r|j)r|d z }d | d|d|ddj|d| }n6t+|j,||j)}d|j.d}|d|dz }dt1|jz}|ddjd|Dd|dz }|Scc} wcc}w)N view_copyzl std::vector shapes{torch::lazy::Shape(out_meta.scalar_type(), out_meta.sizes().vec())};cd|d|dS)Nztorch::lazy::Shape(std::get(out_meta).scalar_type(), std::get(out_meta).sizes().vec())rv)rs r5 this_shapez?GenLazyNativeFuncDefinition.shape_inference..this_shapes9!<`ab`cc}~~r7,z'std::vector shapes{z};F)method&compositeexplicitautogradnonfunctionalmeta_symintz z auto out_meta = at::rrr9z ); rjz auto shapes = rz4 TORCH_INTERNAL_ASSERT(shapes.size() == rUzaten::zq if(torch::lazy::symbolicShapeEnabled()){ std::vector inputs = { c3FK|]}t|jywr)rdr,rs r5rz>GenLazyNativeFuncDefinition.shape_inference..2sFeWXs166{Fes!z. }; const char* schema_str = "z^"; applySymbolicShapesOnLT(schema_str, inputs, shapes); } )rintrrd)rcrlr:rDrtags structuredstructured_delegater;rrr@rAr_r rBrC5has_composite_explicit_autograd_non_functional_kernelrE has_symintrmComputeShapeSignaturekernel shape_callrd)rorAr<rqrreturns_lengthis_view_copy_op is_structuredmeta_outrr shapes_strrImeta_conversion_str meta_call_ctxemeta_call_args dispatch_nsrE shape_str shape_sigfunc_schema_strs r5shape_inferencez+GenLazyNativeFuncDefinition.shape_inferences^%%006###'')V^^,&2O4+C+C4+O OoH!!XXeN>S&Tz!}&TU DzQTXX 1<#;#;#=eN QQQQF $ ((Iyy##%(*B*B*DY&  (MI;a .8Q7RS  I .h.F.F.HI$//05I 44B3C2I I #S^3==AYYFe\dFe=err)rorAr<node_ctor_input_strs r5 build_ir_nodez)GenLazyNativeFuncDefinition.build_ir_node9sf.v6FvGWGWFXXZ[nZop  ! !$ / 01**0*:*:);2>Q=RS   r7Nc|jr|Jd|d|jS|jd|jS)Nz+Requires first tensor to create lazy tensor.r)rrr)rofirst_tensor_names r5create_lazy_tensorz.GenLazyNativeFuncDefinition.create_lazy_tensorCs[  ( ($0 = 0(($*<*<)=> >(()D,>,>+?@@r7c t|j}|jdd}|Dcgc]}|jr|j}}t|dkDr|dnd}d|j d|j |d}|dkDrUt|dkDsJd d |jd |d |j |d td|jd|d }|jjjs|jjr|dk(s Jd|dd|d|d}|dz }|Scc}w)NTFrrzauto result = z( z#(std::move(node), *common_device));rz3Code below assumes there is at least one tensor argr'z,> lazy_tensors; for (int i = 0; i < z,; i++) { lazy_tensors.push_back(rz=(node, i), *common_device)); } auto result = (lazy_tensors);zqWe assumed there was no such case where an op is an in-place variant and has tuple outputs, but got tuple of len rr#z2->SetInPlaceIrValue(node); auto& result = rz return result;) rDrr:r+r,rrrrrinplacerA is_out_fn) rorAr<rrrKrr bridge_strs r5return_aten_tensorz.GenLazyNativeFuncDefinition.return_aten_tensorMsV^^,))u)E 2<XQADWDWx[XX478I4JQ4N-a0TX'(H(H'IJ(():;<<_c  A ()A- E -*$*>*>)?@+,-$$($;$;NaQ &##4"56)*!/J   3Ys D;D;ct||j}|jj|}|Jt|j|j }d|j |jd|jd|j||||d|j||d|j||d|j||d|j||d|j||dgS)Nrjz rrz { rVz } )rrcrlr rArmdeclrrrrrrrr)rorArGrqr<s r5rrz$GenLazyNativeFuncDefinition.__call__ks"tT%7%78%%006###dii0H0H0JKXXd,,-R/@AXBCD " "43 ?@A dF #$% v &'(  f -./  D& )*+ v ./0   r7)rArr<r rrd) rArr<r rqrrG%DispatcherSignature | NativeSignaturerrdr)rz str | Nonerrd)rArrr)rrrrrrrrrrrrrrrrvr7r5rrs""""""!$$!$$+6Z   "  3  * $IV A<! ! r7rcLeZdZdZddZd dZd dZed dZed dZ y) rzm Here we use the base name as the suffix of the signature to avoid generating for in-place variants. ct|j||_djt j |j|Dcgc]}|j c}|_dj|jjdDcgc]}|jc}|_ ||_ ycc}wcc}w)Nrjr9T) generator) r rA_ComputeShapeSignature__schemar; dispatcherrBr%_ComputeShapeSignature__dispatch_argsr:r,!_ComputeShapeSignature__call_args#_ComputeShapeSignature__kernel_name)ro kernel_namerpr3rKr4s r5__init__zComputeShapeSignature.__init__s$QVVF; #yy)33AFF6J K!QVVX K  99&*mm&A&AD&A&Q Rsz] R ) L Ss B?Cc:|jd|jdSNrr%)r'r%ros r5 __decl_suffixz#ComputeShapeSignature.__decl_suffixs#$$%Qt';';&>r7c:|jd|jdSr+)r'r&r,s r5 __call_suffixz#ComputeShapeSignature.__call_suffixs#$$%Qt'7'7&8::r7c(d|jS)Nz8TORCH_API std::vector compute_shape_)#_ComputeShapeSignature__decl_suffixr,s r5 shape_declz ComputeShapeSignature.shape_declsI$J\J\J^I_``r7c(d|jS)Nztorch::lazy::compute_shape_)#_ComputeShapeSignature__call_suffixr,s r5rz ComputeShapeSignature.shape_calls,T-?-?-A,BCCr7N)r(rdrprr3rgrNone)rrd) rrr__doc__r)r1r4propertyr2rrvr7r5rrsB)?;aaDDr7rc4eZdZUded<ded<eddZy)GenLazyShapeInferenceDefinitionrrcrdrc,|jj|}|Jd|jv}|jxs|jdu}|s|rgSt |j ||j}dj|jdggS)Nrrj r) rcrlrrrrrrmr;r2)rorprqrrr s r5rrz(GenLazyShapeInferenceDefinition.__call__s%%003###&/ I(=(=T(I OI-8+C+C+EIII)"6"6!7q9:;< SWXx(  [__V,Q/0 1 Lr7)r4r rrdr)r<r rGrrHrdrrd)rGrrztuple[str, list[Binding]])r@zlist[dict[str, Any]]rArbrr)7 __future__rrabcr dataclassesrtypingrtorchgen.api.dispatcherapir$torchgen.api.lazyrrr r r r torchgen.api.translater torchgen.api.typesrrrrrrrrtorchgen.contextrtorchgen.dest.lazy_ts_loweringrtorchgen.modelrrrrrrrrr r6r>rNrFr_rbrrrr9rErvr7r5rRs;"!,,-   9;   8!v'  . 0  +  $h h h V $5)55p $n n n bDD< $===*$3<r7