L i; UddlZddlZddlZddlZddlmZmZddlmZm Z m Z m Z ddl m Z ddlmZmZddlmZiZeeeefed<d ed eefd Zd e d efd eefdZe d dddd ed e e de eeefde ed e f dZej6ZdZej<ded edfdZ d efdZ!de d efdZ"e dde d efdZ#y)N) GeneratorIterable)AnyCallableOptionalUnion) exposed_in) custom_op CustomOpDef) infer_schematriton_ops_to_kernelsnamereturnc.tj|gSN)rget)rs [/mnt/ssd/data/python-lab/Trading/venv/lib/python3.12/site-packages/torch/_library/triton.pyget_triton_kernels_for_oprs $ $T2 ..fn.cRdtdtfdttfd}||S)a* Inspect the source of an arbitrary callable passed to torch._library.triton_op, and grab all of the triton kernels that are wrapped inside of it. TODO: This check is best effort. It does *not* handle the case where the triton kernel is hidden behind recursive function calls. r.rc tj|}ddlm}|}|j |dtj|j}Gddtj}|}|j|tj|}g}|jD]} | |jvr|j|j| 0| |j vr|j|j | ]| |j"vsl|j|j"| |S#ttf$rgcYSwxYw)Nr)IndentedBufferT)stripc8eZdZddZdej ddfdZy)Fget_inner_triton_kernels..find_triton_kernels..VisitorrNcg|_yr)triton_kernels)selfs r__init__zOget_inner_triton_kernels..find_triton_kernels..Visitor.__init__,s 13#rnodecd}t|jtjr|j}t|jtjrt|jjtj rT|jjj dk(r0|jjdk(r|j|vr|jrt|jdtj r|jj|jdj nt|jtj r}|jj |vre|jrYt|jdtj r2|jj|jdj |j|y)N)capture_triton wrap_tritontorch_libraryr) isinstancefuncast AttributevalueNameidattrargsrappend generic_visit)r r"triton_func_namesr/s r visit_CallzQget_inner_triton_kernels..find_triton_kernels..Visitor.visit_Call/s-$E!dii799D"4::s}}=&tzz'7'7B JJ,,//7: JJOOz9 II)::99DIIaL#(()K //66tyy|G  3884yy||'8899DIIaL#(()K //66tyy|G""4(r)rN)__name__ __module__ __qualname__r!r*Callr4rrVisitorr+s 4 )sxx )D )rr:)inspect getsourceOSError TypeErrortorch._inductor.utilsrsplicer*parse getrawvalue NodeVisitorvisitgetclosurevarsr nonlocalsr1globalsbuiltins) rsourcerbuffertreer: collector closure_varsresolvedrs rfind_triton_kernelsz5get_inner_triton_kernels..find_triton_kernelss, &&r*F 9! fD )yy++-. )coo )6I --b1 ,, =D|--- 6 6t <=--- 4 4T :;... 5 5d ;<  =c# I sD<<EE)rrlistobject)rrOs rget_inner_triton_kernelsrRs04c 24tF|4l r ""rz torch.library)schema mutates_argsrScVdtdtfdtffd }||S||S)aCreate a custom operator whose implementation is backed by 1+ triton kernels. This is a more structured way of using triton kernels with PyTorch. Prefer using triton kernels with no ``torch.library`` custom operator wrappers (like :func:`torch.library.custom_op`, :func:`torch.library.triton_op`) because that is simpler; only use :func:`torch.library.custom_op`/:func:`torch.library.triton_op` if you want to create an operator that behaves like PyTorch built-in operators. For example, you may use a ``torch.library`` wrapper API to define the behavior of the triton kernel when passed a tensor subclass or under a TorchDispatchMode. Use :func:`torch.library.triton_op` instead of :func:`torch.library.custom_op` when the implementation consists of 1+ triton kernels. :func:`torch.library.custom_op` treats custom operators as opaque (:func:`torch.compile` and :func:`torch.export.export` will never trace into them), but ``triton_op`` makes the implementation visible to these subsystems, allowing them to optimize the triton kernel(s). Note that ``fn`` must only consist of calls to PyTorch-understood operators and triton kernels. Any triton kernels called inside ``fn`` must be wrapped in a call to :func:`torch.library.wrap_triton`. Args: name (str): A name for the custom op that looks like "{namespace}::{name}", e.g. "mylib::my_linear". The name is used as the op's stable identifier in PyTorch subsystems (e.g. torch.export, FX graphs). To avoid name collisions, please use your project name as the namespace; e.g. all custom ops in pytorch/fbgemm use "fbgemm" as the namespace. mutates_args (Iterable[str] or "unknown"): The names of args that the function mutates. This MUST be accurate, otherwise, the behavior is undefined. If "unknown", it pessimistically assumes that all inputs to the operator are being mutated. schema (None | str): A schema string for the operator. If None (recommended) we'll infer a schema for the operator from its type annotations. We recommend letting us infer a schema unless you have a specific reason not to. Example: "(Tensor x, int y) -> (Tensor, Tensor)". Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> import torch >>> from torch.library import triton_op, wrap_triton >>> >>> import triton >>> from triton import language as tl >>> >>> @triton.jit >>> def add_kernel( >>> in_ptr0, >>> in_ptr1, >>> out_ptr, >>> n_elements, >>> BLOCK_SIZE: "tl.constexpr", >>> ): >>> pid = tl.program_id(axis=0) >>> block_start = pid * BLOCK_SIZE >>> offsets = block_start + tl.arange(0, BLOCK_SIZE) >>> mask = offsets < n_elements >>> x = tl.load(in_ptr0 + offsets, mask=mask) >>> y = tl.load(in_ptr1 + offsets, mask=mask) >>> output = x + y >>> tl.store(out_ptr + offsets, output, mask=mask) >>> >>> @triton_op("mylib::add", mutates_args={}) >>> def add(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor: >>> output = torch.empty_like(x) >>> n_elements = output.numel() >>> >>> def grid(meta): >>> return (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) >>> >>> # NB: we need to wrap the triton kernel in a call to wrap_triton >>> wrap_triton(add_kernel)[grid](x, y, output, n_elements, 16) >>> return output >>> >>> @torch.compile >>> def f(x, y): >>> return add(x, y) >>> >>> x = torch.randn(3, device="cuda") >>> y = torch.randn(3, device="cuda") >>> >>> z = f(x, y) >>> assert torch.allclose(z, x + y) r.rc fd}t|t}ddlm}|j fd}t }|t <|j|||S)NcVtd5|i|cdddS#1swYyxYw)NF)set_wrap_triton_enabled)r0kwargsrs r backend_fnz*triton_op..dec..backend_fns/)/ +4*6* + + +s()rT)rTrS)FunctionalTensorModecbddlm}|r|j||||Sddl}|Dcgc]R}t ||j j s0||j|j jjfvr|T}}|rtS|5 |i|cdddScc}w#1swYyxYw)Nr)(custom_triton_ops_decomposition_disabled) torch.export._tracer^__torch_dispatch__torch._subclasses issubclass _subclasses FakeTensorTensorfunctional_tensorFunctionalTensorNotImplemented) modeoptypesr0rYr^r&tunrecognized_typesrs rfunctional_decompz1triton_op..dec..functional_decomps& U79..r5$GG)# &%a):):)E)EF ));;LL &" &&))/t.v.// &//sAB B%%B.)r r _subclasses.functional_tensorr\ register_fakerRrregister_torch_dispatch)rrZresultr\rnrrTrs` rdecztriton_op..decsq +   %>   I R * /X2"5&4d#&&';=NO r)rrQr )rrrTrSrss` ` r triton_oprtWs9DGf%G+GR z 2wrTenabled)NNNc#vK t}|t_d|t_y#t_wxYww)aOIf triton kernels annotated with @wrap_triton should dispatch via HOP or go straight to the triton kernel execution. We have this switch because eager-mode performance of HOP dispatch is slow enough to matter (~1ms) and we know that wrap_triton isn't necessary in some situations (eager-mode with regular Tensors) N)is_wrap_triton_enabledwrap_triton_enabledr,)ruprevs rrXrX s1)%'$+! $(!D!s9) 9 69c,ttdtS)Nr,)getattrrxwrap_triton_enabled_defaultr9rrrwrws &1L MMr triton_kernelct|S)z(This API has been renamed to wrap_triton)r%)r}s rr$r$!s } %%rcddlm}ddlm}ddlm}t |||fs tdts|S||ddS)aU Allows capture of a triton kernel into a graph via make_fx or non-strict ``torch.export``. These technologies perform Dispatcher-based tracing (via ``__torch_dispatch__``) and cannot see calls to raw triton kernels. The ``wrap_triton`` API wraps a triton kernel into a callable that can actually be traced into a graph. Please use this API together with :func:`torch.library.triton_op`. Examples: >>> # xdoctest: +SKIP >>> import torch >>> import triton >>> from triton import language as tl >>> from torch.fx.experimental.proxy_tensor import make_fx >>> from torch.library import wrap_triton >>> >>> @triton.jit >>> def add_kernel( >>> in_ptr0, >>> in_ptr1, >>> out_ptr, >>> n_elements, >>> BLOCK_SIZE: "tl.constexpr", >>> ): >>> pid = tl.program_id(axis=0) >>> block_start = pid * BLOCK_SIZE >>> offsets = block_start + tl.arange(0, BLOCK_SIZE) >>> mask = offsets < n_elements >>> x = tl.load(in_ptr0 + offsets, mask=mask) >>> y = tl.load(in_ptr1 + offsets, mask=mask) >>> output = x + y >>> tl.store(out_ptr + offsets, output, mask=mask) >>> >>> def add(x, y): >>> output = torch.empty_like(x) >>> n_elements = output.numel() >>> >>> def grid_fn(meta): >>> return (triton.cdiv(n_elements, meta["BLOCK_SIZE"]),) >>> >>> wrap_triton(add_kernel)[grid_fn](x, y, output, n_elements, 16) >>> return output >>> >>> x = torch.randn(3, device="cuda") >>> y = torch.randn(3, device="cuda") >>> gm = make_fx(add)(x, y) >>> print(gm.code) >>> # def forward(self, x_1, y_1): >>> # empty_like = torch.ops.aten.empty_like.default(x_1, pin_memory = False) >>> # triton_kernel_wrapper_mutation_proxy = triton_kernel_wrapper_mutation( >>> # kernel_idx = 0, constant_args_idx = 0, >>> # grid = [(1, 1, 1)], kwargs = { >>> # 'in_ptr0': x_1, 'in_ptr1': y_1, 'out_ptr': empty_like, >>> # 'n_elements': 3, 'BLOCK_SIZE': 16 >>> # }) >>> # return empty_like r) Autotuner) JITFunction)TraceableTritonKernelWrapperzPwrap_triton only works on functions annotated with triton.jit or triton.autotuneN) triton.runtime.autotunerrtriton.runtime.jitr*torch._higher_order_ops.triton_kernel_wraprr( RuntimeErrorrw)r}rrrs rr%r%&sJ~3.W mk9%= > ^   " # ' tT BBrr)$r* contextlibr; threadingcollections.abcrrtypingrrrrtorch.utils._exposed_inr custom_opsr r r rdictstrrPrQ__annotations__rrRrtlocalrxr|contextmanagerboolrXrwr$r%r9rrrs /11..&24tCf-.3/C/DL/?##s(!3?#V ?#D O"m ! m mm Xc]*+ m SM mmm`&ioo'"  )T )i8H.I ) ) NN&(&#&  OICxICsICICr