L iUddlZddlZddlZddlZddlZddlZddlmZmZm Z m Z ddl Z ddl m Z mZmZmZmZmZmZmZmZmZmZmZmZmZmZmZmZddlmZddl m!Z!m"Z"ddl#m$Z$m%Z%ddl&m'Z'm(Z(m)Z)m*Z*gd Z+ejXe-Z.d Z/e(a0ejbd Z2d Z3Gd dZ4e5a6e5ee7d<ejpZ9iZ:e;ets td|i|S)NzHRPC has not been initialized. Call torch.distributed.rpc.init_rpc first.)r RuntimeError)argskwargsfuncs r.wrapperz%_require_initialized..wrapperVs-(*8 T$V$$) functoolswraps)r5r6s` r._require_initializedr:Us%__T%% Nr7ceZdZdZy)r(cDi|_tj|_yN)gathered_objects threadingEventproceed_signal)selfs r.__init__zAllGatherStates.__init__cs!#(oo/r7N)__name__ __module__ __qualname__rCr7r.r(r(bs 0r7r(_ALL_WORKER_NAMES_all_gather_sequence_id!_all_gather_sequence_id_to_statesc|j}|Dchc]}|jc}ats t |yycc}wr=)get_worker_infosnamerHrr)agent worker_infos worker_infos r._init_rpc_statesrQ}sB))+L=IJk))J % & ' 'KsAcLt5|st}||vs J|dt|}||jvs J|d|d||j|<|t |jj k(r|j j dddy#1swYyxYw)Nz is not expected by leader.z reported intent sequence id z twice. )_all_gather_dict_lockrHrJr>setkeysrA) sequence_id worker_nameobj worker_namesstatess r._gather_to_leaderr[s  (,L,. -:; .3;?&"9"99 m8 X N 903 , 3v66;;=> >  ! ! % % ' ( ( (s B BB#ct5t|}dddjjr Jd|d||_|jj y#1swYPxYw)NzTermination signal sequence id z got set twice.)rSrJrAis_setr>rT)rV objects_maprZs r._broadcast_to_followersr_sq @2;?@$$++- )+oF -*F @@s A  A)c#2Kgt_ d tjj tjt`y#t`wxYw# tjj tjt`w#t`wxYwxYww)a| A context manager that collects all futures returned by ``rpc_async`` and waits them on the context manager's exit; relieving the user of needing to explicitly call wait. Example:: >>> # xdoctest: +SKIP("distributed") >>> # On worker 0: >>> import torch >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker0", rank=0, world_size=2) >>> with rpc._wait_all(): >>> fut_1 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) >>> fut_2 = rpc.rpc_async(dst, torch.add, (torch.ones(2, 2), 1)) >>> #fut_1 and fut_2 are waited on N)_thread_local_var future_listtorchfutureswait_allrGr7r. _wait_allrfsp&%'!.  . MM " "#4#@#@ A!-!- . MM " "#4#@#@ A!-!-sE BA-ABABB-B B BBBtimeoutc ~|stJdt}t|}tjj}t 5dj t|}tj|d}|dzt|<|t|z}ddd||k(}|tk(r t} d} n|tk(r|} d} n|x} } |rt|||nt|t|||f| t 5t |} ddd j"j%| |ri} ||hz D]&} t'| t(|| j*f| }|| | <(g}| j-D]\} } |j%|r,t/d|Dcgc]}|d c}d | d d |ddt 5t j3|} ddd| j*S#1swYOxYw#1swYxYw#t.$r}|j1| |fYd}~d}~wwxYwcc}w#1swY| j*SxYw) a This is similar to torch.distributed.all_gather(), but is using RPC. It picks the worker with the smallest name (alphabetic order) as the leader. Then all followers send their data ``obj`` to the leader. After the leader has received all, it will broadcast the results back to all followers. This function blocks until all workers have received the gathered results. Nz=`_ALL_WORKER_NAMES` is not initialized for `def _all_gather`.rrr3rgrgz Followers z timed out in _all_gather after z.2fz! seconds. The first exception is )rHminr r#rMrSjoinsortedrIgetstrrrrr[r%rJrAwaitr&r_r>itemsr2appendpop)rXrYrg leader_name self_name concat_names sequence_numrV is_leader rpc_timeoutsignal_timeoutrZ#worker_name_to_response_future_dict follower_namefuterrorsexes r. _all_gatherrs  , K ,) l#K&(88:??I 7wwvl34 .22<C 0 E"E"K"K"M 3 M3 3  3 F3qad345$S))J6RS9UV<.Z  D266{CD  " ""K77B@@0  3 }b122 3 4 D  " ""sC A G!7 G.1G: H# 7H(!G+.G7: H HH (H<c tdt|y#t$r }tj d|Yd}~yd}~wwxYw)a& Synchronizes local and remote RPC processes. This will block until all local and remote RPC processes specified under worker_names reach this method to wait for all outstanding work to complete. Args: worker_names (List[str]): The set of workers to synchronize. Nz(Failed to complete barrier, got error %s)rrTr2loggererror)rYrs r._barrierrs;ED#l+, E ?DDEs A<Acr td|y#t$r}tjd||d}~wwxYw)ag Block until all local and remote RPC processes reach this method and wait for all outstanding work to complete. Every RPC process must call this method before exit to perform a graceful shutdown. This should be used to terminate the RPC framework, and there is no guarantee that the RPC framework will work after this method returns. Nrkz=Failed to respond to 'Shutdown Proceed' in time, got error %s)rr2rr)rgrs r._wait_all_workersr.s9D'*  KR  s  616c 8|r t}ddlm}t||r |jr)t |t |jd|n|j}|j}t|j|d5|j}|D]2}|j|k7st|jt|gidf4|jd|dddtyty#1swYxYw#twxYw)a Perform a shutdown of the RPC agent, and then destroy the RPC agent. This stops the local agent from accepting outstanding requests, and shuts down the RPC framework by terminating all RPC threads. If ``graceful=True``, this will block until all local and remote RPC processes reach this method and wait for all outstanding work to complete. Otherwise, if ``graceful=False``, this is a local shutdown, and it does not wait for other RPC processes to reach this method. .. warning:: For :class:`~torch.futures.Future` objects returned by :meth:`~torch.distributed.rpc.rpc_async`, ``future.wait()`` should not be called after ``shutdown()``. Args: graceful (bool): Whether to do a graceful shutdown or not. If True, this will 1) wait until there is no pending system messages for ``UserRRefs`` and delete them; 2) block until all local and remote RPC processes have reached this method and wait for all outstanding work to complete. Example:: Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly on both workers. Refer to :meth:`~torch.distributed.init_process_group` API for more details. For example, export MASTER_ADDR=localhost export MASTER_PORT=5678 Then run the following code in two different processes: >>> # xdoctest: +SKIP >>> # On worker 0: >>> import torch >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker0", rank=0, world_size=2) >>> # do some work >>> result = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(1), 1)) >>> # ready to shutdown >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker1", rank=1, world_size=2) >>> # wait for worker 0 to finish work, and then shutdown. >>> rpc.shutdown() r)TensorPipeAgentT)r"rgF)r3N)r torch._C._distributed_rpcr isinstanceis_static_grouprrrmr#rMrstorerLr%r_finalize_shutdown)gracefulrgrNrmy_worker_infomy_nameall_worker_infosworkers r.r"r"@sd !*,E Ae_59N9N!'*9; D' :"'!6!6!8(--1%++wN ?','='='?$"2!;;'1$ & 8&4b"e%DJJgJ> ?   ? ?  s*BD %D.5D#D D D Dc tttjt t y#tjt t wxYwr=)r _ignore_rref_leakr r"rrrGr7r.rrsN#/0 ))+ $% "  ))+ $% "s >.A,ch|tj|StjS)aI Get :class:`~torch.distributed.rpc.WorkerInfo` of a given worker name. Use this :class:`~torch.distributed.rpc.WorkerInfo` to avoid passing an expensive string on every invocation. Args: worker_name (str): the string name of a worker. If ``None``, return the the id of the current worker. (default ``None``) Returns: :class:`~torch.distributed.rpc.WorkerInfo` instance for the given ``worker_name`` or :class:`~torch.distributed.rpc.WorkerInfo` of the current worker if ``worker_name`` is ``None``. )r r#)rWs r.r#r#s0 %'77 DD%'7799r7ct|tr|St|ttfr t |St d|)Nz Cannot get WorkerInfo from name )rrrpintr# ValueError)tos r._to_worker_infors="j! Bc #r"";B4@AAr7blockingct|j}|r|Stt}|j||Sr=)type local_valuer set_result)rrefr rref_typefutures r._rref_typeof_on_ownerrs>T%%'(I )$ r7cnt|jt|f|}|r|jS|S)Nrj)r&ownerrrq)rrgrr~s r._rref_typeof_on_userrs1 DJJL"7tgw WCxxz r7Tc eZdZyr'NrDrErFrGr7r.r'r's r7r'c eZdZyrrrGr7r.r'r' r7c eZdZy)RRefMetaNrrGr7r.rrrr7rc eZdZyrrrGr7r.r'r'rr7) metaclassc8fd}|jr||_|S)NcDttt||i|Sr=)getattrsuperr')rBr3r4 method_names r.methodzmethod_factory..methods#6wuT4(+6GGGr7)__doc__)r docstringrs` r.r)r)sH~~" Mr7___str__rz4RRef user-facing methods should all have docstrings.ztorch.distributed.rpc.PyRRefztorch.distributed.rpc.RRefctjjdtjjj |}t |}t}t|||tj|}|5} |r|nd}|r|ni}t|d} | r2|j} t| tjjr| }|t|||g|i|} nt|tjjr:t!|j"tj$j'||| g|i|} n2t(j+t-|||\} }t/|| ||| } |rTtj0j3sJ| J| j5| j7}| j9|ddd| S#1swY SxYw)an Make a remote call to run ``func`` on worker ``to`` and return an :class:`~torch.distributed.rpc.RRef` to the result value immediately. Worker ``to`` will be the owner of the returned :class:`~torch.distributed.rpc.RRef`, and the worker calling ``remote`` is a user. The owner manages the global reference count of its :class:`~torch.distributed.rpc.RRef`, and the owner :class:`~torch.distributed.rpc.RRef` is only destructed when globally there are no living references to it. Args: to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. func (Callable): a callable function, such as Python callables, builtin operators (e.g. :meth:`~torch.add`) and annotated TorchScript functions. args (tuple): the argument tuple for the ``func`` invocation. kwargs (dict): is a dictionary of keyword arguments for the ``func`` invocation. timeout (float, optional): timeout in seconds for this remote call. If the creation of this :class:`~torch.distributed.rpc.RRef` on worker ``to`` is not successfully processed on this worker within this timeout, then the next time there is an attempt to use the RRef (such as ``to_here()``), a timeout will be raised indicating this failure. A value of 0 indicates an infinite timeout, i.e. a timeout error will never be raised. If not provided, the default value set during initialization or with ``_set_rpc_timeout`` is used. Returns: A user :class:`~torch.distributed.rpc.RRef` instance to the result value. Use the blocking API :meth:`torch.distributed.rpc.RRef.to_here` to retrieve the result value locally. .. warning :: The ``remote`` API does not copy storages of argument tensors until sending them over the wire, which could be done by a different thread depending on the RPC backend type. The caller should make sure that the contents of those tensors stay intact until the returned RRef is confirmed by the owner, which can be checked using the :meth:`torch.distributed.rpc.RRef.confirmed_by_owner` API. .. warning :: Errors such as timeouts for the ``remote`` API are handled on a best-effort basis. This means that when remote calls initiated by ``remote`` fail, such as with a timeout error, we take a best-effort approach to error handling. This means that errors are handled and set on the resulting RRef on an asynchronous basis. If the RRef has not been used by the application before this handling (such as ``to_here`` or fork call), then future uses of the ``RRef`` will appropriately raise errors. However, it is possible that the user application will use the ``RRef`` before the errors are handled. In this case, errors may not be raised as they have not yet been handled. Example:: Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly on both workers. Refer to :meth:`~torch.distributed.init_process_group` API for more details. For example, export MASTER_ADDR=localhost export MASTER_PORT=5678 Then run the following code in two different processes: >>> # xdoctest: +SKIP >>> # On worker 0: >>> import torch >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker0", rank=0, world_size=2) >>> rref1 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 3)) >>> rref2 = rpc.remote("worker1", torch.add, args=(torch.ones(2), 1)) >>> x = rref1.to_here() + rref2.to_here() >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker1", rank=1, world_size=2) >>> rpc.shutdown() Below is an example of running a TorchScript function using RPC. >>> # On both workers: >>> @torch.jit.script >>> def my_script_add(tensor: torch.Tensor, scalar: int): >>> return torch.add(tensor, scalar) >>> # On worker 0: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker0", rank=0, world_size=2) >>> rref = rpc.remote("worker1", my_script_add, args=(torch.ones(2), 3)) >>> rref.to_here() >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker1", rank=1, world_size=2) >>> rpc.shutdown() ztorch.distributed.rpc_remoterG_wrapped_async_rpc_functionN)rc_C_log_api_usage_oncejit _builtins _find_builtinr_get_should_profile_enable_rpc_profilerr!REMOTEhasattrrrScriptFunctionr r rM _jit_internal_qualified_namer, serializer r autograd_profiler_enabled_call_end_callbacks_on_future _get_future_set_profiling_future)rr5r3r4rgqualified_namedst_worker_infoshould_profile ctx_managerrf is_async_execwrappedrpickled_python_udftensorsr~s r.r$r$"sP HH  !?@YY((66t>335 55> !>2243C3C3EFC  & &s +I$,L KM$,L Ks =EGGrzc Ft|s tdtjjj |}t |}t}t|||||} | 5} |r|nd}|r|ni}t|d} | r2|j} t| tjjr| }|t|||g|i|} nt|tjjr8t|jtj j#||||| } n2t$j't)|||\}}t+||||| } |r5tj,j/sJ| J| j1| } ddd| S#1swY SxYw)Nzfunction should be callable.rGr)callable TypeErrorrcrrrrrrrrrrrrrMrrr,rr rrrr)rr5rpc_typer3r4rzrrrrrrrr~rrs r. _invoke_rpcrs D>677YY((66t>335 55> !> 2237CO'8P JQ'8P Js &D&FF ctjjdt||tj |||}|j S)a Make a blocking RPC call to run function ``func`` on worker ``to``. RPC messages are sent and received in parallel to execution of Python code. This method is thread-safe. Args: to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. func (Callable): a callable function, such as Python callables, builtin operators (e.g. :meth:`~torch.add`) and annotated TorchScript functions. args (tuple): the argument tuple for the ``func`` invocation. kwargs (dict): is a dictionary of keyword arguments for the ``func`` invocation. timeout (float, optional): timeout in seconds to use for this RPC. If the RPC does not complete in this amount of time, an exception indicating it has timed out will be raised. A value of 0 indicates an infinite timeout, i.e. a timeout error will never be raised. If not provided, the default value set during initialization or with ``_set_rpc_timeout`` is used. Returns: Returns the result of running ``func`` with ``args`` and ``kwargs``. Example:: Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly on both workers. Refer to :meth:`~torch.distributed.init_process_group` API for more details. For example, export MASTER_ADDR=localhost export MASTER_PORT=5678 Then run the following code in two different processes: >>> # xdoctest: +SKIP >>> # On worker 0: >>> import torch >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker0", rank=0, world_size=2) >>> ret = rpc.rpc_sync("worker1", torch.add, args=(torch.ones(2), 3)) >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker1", rank=1, world_size=2) >>> rpc.shutdown() Below is an example of running a TorchScript function using RPC. >>> # On both workers: >>> @torch.jit.script >>> def my_script_add(tensor: torch.Tensor, scalar: int): >>> return torch.add(tensor, scalar) >>> # On worker 0: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker0", rank=0, world_size=2) >>> ret = rpc.rpc_sync("worker1", my_script_add, args=(torch.ones(2), 3)) >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker1", rank=1, world_size=2) >>> rpc.shutdown() ztorch.distributed.rpc_sync)rcrrrr!SYNCrqrr5r3r4rgr~s r.r%r%s>J HH  !=> b$ 0 0$ HC 88:r7ctjjdt||tj |||}t tdrtjj||S)aH Make a non-blocking RPC call to run function ``func`` on worker ``to``. RPC messages are sent and received in parallel to execution of Python code. This method is thread-safe. This method will immediately return a :class:`~torch.futures.Future` that can be awaited on. Args: to (str or WorkerInfo or int): name/rank/``WorkerInfo`` of the destination worker. func (Callable): a callable function, such as Python callables, builtin operators (e.g. :meth:`~torch.add`) and annotated TorchScript functions. args (tuple): the argument tuple for the ``func`` invocation. kwargs (dict): is a dictionary of keyword arguments for the ``func`` invocation. timeout (float, optional): timeout in seconds to use for this RPC. If the RPC does not complete in this amount of time, an exception indicating it has timed out will be raised. A value of 0 indicates an infinite timeout, i.e. a timeout error will never be raised. If not provided, the default value set during initialization or with ``_set_rpc_timeout`` is used. Returns: Returns a :class:`~torch.futures.Future` object that can be waited on. When completed, the return value of ``func`` on ``args`` and ``kwargs`` can be retrieved from the :class:`~torch.futures.Future` object. .. warning :: Using GPU tensors as arguments or return values of ``func`` is not supported since we don't support sending GPU tensors over the wire. You need to explicitly copy GPU tensors to CPU before using them as arguments or return values of ``func``. .. warning :: The ``rpc_async`` API does not copy storages of argument tensors until sending them over the wire, which could be done by a different thread depending on the RPC backend type. The caller should make sure that the contents of those tensors stay intact until the returned :class:`~torch.futures.Future` completes. Example:: Make sure that ``MASTER_ADDR`` and ``MASTER_PORT`` are set properly on both workers. Refer to :meth:`~torch.distributed.init_process_group` API for more details. For example, export MASTER_ADDR=localhost export MASTER_PORT=5678 Then run the following code in two different processes: >>> # xdoctest: +SKIP >>> # On worker 0: >>> import torch >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker0", rank=0, world_size=2) >>> fut1 = rpc.rpc_async("worker1", torch.add, args=(torch.ones(2), 3)) >>> fut2 = rpc.rpc_async("worker1", min, args=(1, 2)) >>> result = fut1.wait() + fut2.wait() >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker1", rank=1, world_size=2) >>> rpc.shutdown() Below is an example of running a TorchScript function using RPC. >>> # On both workers: >>> @torch.jit.script >>> def my_script_add(tensor: torch.Tensor, scalar: int): >>> return torch.add(tensor, scalar) >>> # On worker 0: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker0", rank=0, world_size=2) >>> fut = rpc.rpc_async("worker1", my_script_add, args=(torch.ones(2), 3)) >>> ret = fut.wait() >>> rpc.shutdown() >>> # On worker 1: >>> import torch.distributed.rpc as rpc >>> rpc.init_rpc("worker1", rank=1, world_size=2) >>> rpc.shutdown() ztorch.distributed.rpc_asyncrb) rcrrrr!ASYNCrrarbrsrs r.r&r&@sVr HH  !>? b$ 1 14 IC -0%%,,S1 Jr7ctjjj}tjj xr5tjj j|jk(Sr=) rcr _profilerActiveProfilerTyperr _autograd_profiler_typeLEGACY)rs r.rrsW++>> ((* M HH   - - /3E3L3L Lr7ctj}|r|Pt|tjj rtj j|n |j}n|}t||tj|j}tj|tjjj!|}|Sr=) contextlib nullcontextrrcrrrrrFrr#rMrset_current_profiling_keyrprofilerrecord_function)rrr5rrr func_namerpc_profiling_keys r.rrs((*K  !dEII$<$<=##33D9&&  'I4     " "    778IJnn--==>OP r7r=)T)g collectionsrr8inspectloggingr?typingrrrrrcrrrr r r r r rrrrrrrrrr torch.futuresr_utilsrr constantsrrinternalrrr r!__all__ getLoggerrDrrr,contextmanagerr/r:r(rTrH__annotations__RLockrSrIdictrpr defaultdictrJrQr[r_localrarffloatrrrr"rr#rboolrrrGenericWithOneTypeVarr'r __class__rr) getmembersrr startswithrrreplacer*setattrr$rr%r&rrrGr7r.r s77 &!JB    8 $(  1 1 00&"e3s8#' )*,c3h,=T[=T=T>!;#:#: ( (  $IOO% ..8"&9JV#V#V#rEE"6"$<LL^#*::*B $ -t8< CL  vay'!*  671: ..7--f5+Kc"{i'? 40I  X"XX !!&(DI  Y7J D+z*1+6t5FVVt"$EV7=B7t @QFFFR!$8I\\~A   v'')>)H)H   60H  sI3I>=I>