~L i^L0 UddlZddlZddlZddlZddlZddlZddlZddlZddlZddl Z ddl Z ddl Z ddl Z ddl Z ddlmZmZddlmZddlmZmZmZmZmZmZmZmZddlmZmZddlZddl m!Z!ddl"m#Z#ddl$m%Z%ddl&m'Z'dd l(m)Z)m*Z*gd Z+d Z,ejZd j\Z/ejZd j\Z0ejZdj\Z1dZ2dZ3dZ4eeee*e5ge*fejle5e7e5e5ffZ8ee9d<ee*ejtjvejxfZ=ee9d<e j|dk(Z?dZ@e?s ddlAmBZBmCZCnd\ZCZBdZDGdde jZFeFaGGddeHZIedZJgZKeLeMeNee=gee5fee=e5gee=ffe9d<Gdd eZOd!eeOfd"ZPd#ZQd!eRfd$ZSd%eRfd&ZTd!eeNfd'ZUd!eNfd(ZVGd)d*ZWdd+ZXd!eLeeeMee5fffd,ZYd-eLeeeMee5ffd!dfd.ZZGd/d-e!jZ\d0e)d!eLe5fd1Z]Gd2d3Z^d!eRfd4Z_d5eNd6ee=gee5fd7ee=e5gee=ffd8Z` dd:Zad;Zbd<Zcd=Zdd>Zed?Zfd@ZgdAZhdBZidCZjdDZkdEZle`dFebefe`dGejeedHejeldHe`dIecege`dJedehe`dKejeedLejeldLe`dMejeedNejeldNe`dOejeedPejeldPdQee*ejtjvejxffdRZndSZodTZpdUZqdVerd!eee5ejffdWZtedXZuGdYdZeeuZvGd[d\eveewZxGd]d^eveewZyGd_d`eveewZzdVe)dae5d!eveewfdbZ{GdcddevejjZ~GdedfevejjZGdgdhevejjZdVee5eewfd!evfdiZd!eRfdjZdkZd!eRfdlZddmZdnZee,d9dofdperd0e)dqedreNdseRdteRd!dfduZddvZdwZ ddddxd0e)dye8dqedzeeRd{eeRd|ed!efd}Zd~Zie_ej ejddZdeewe5fd!e5fdZdZGddZ ddZdZy)N)closingcontextmanager)Enum)AnyCallablecastGenericIOOptionalTypeVarUnion) TypeAliasTypeIs)get_source_lines_and_file)_import_dotted_name)#_get_dtype_from_pickle_storage_type)FileLikeStorage)SourceChangeWarningmkdtempregister_package%check_module_version_greater_or_equalvalidate_cuda_devicevalidate_hpu_device location_tagdefault_restore_locationnormalize_storage_typestorage_to_tensor_typesaveload StorageTypeLoadEndiannessget_crc32_optionsset_crc32_optionsget_default_load_endiannessset_default_load_endiannessget_default_mmap_optionsset_default_mmap_optionsclear_safe_globalsget_safe_globalsadd_safe_globals safe_globals get_unsafe_globals_in_checkpoint skip_dataz=lz=iz=hll|9 QC i, MAP_LOCATIONSTORAGEwin32a(In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.) MAP_PRIVATE MAP_SHARED)NNcJttjd }|duxr| S)N git_version)hasattrtorchversion) pickle_module is_fbcodes Y/mnt/ssd/data/python-lab/Trading/venv/lib/python3.12/site-packages/torch/serialization.py_default_to_weights_onlyr>Xs'EMM=99I D 2]2ceZdZfdZxZS)_SerializationLocalcLt|d|_d|_d|_y)NF)super__init__ map_locationr.materialize_fake_tensors)self __class__s r=rDz_SerializationLocal.__init__cs% 48$.3%r?)__name__ __module__ __qualname__rD __classcell__rHs@r=rArAbs 44r?rAc eZdZy)rN)rIrJrKr?r=rrmsr?rc#Ktj} |tj|y#tj|wxYwwN)tempfilershutilrmtreepaths r=rrqs6    D  d dsA 2A A  A _package_registryceZdZdZdZdZy)r"r/N)rIrJrKNATIVELITTLEBIGrOr?r=r"r"s F F Cr?r"returnc:ddlm}|jjS)a Get fallback byte order for loading files If byteorder mark is not present in saved checkpoint, this byte order is used as fallback. By default, it's "native" byte order. Returns: default_load_endian: Optional[LoadEndianness] rconfig)torch.utils.serializationrar endiannessr`s r=r%r%s1 ;; ! !!r?clt|ts | tdddlm}||j _y)z Set fallback byte order for loading files If byteorder mark is not present in saved checkpoint, this byte order is used as fallback. By default, it's "native" byte order. Args: endianness: the new fallback byte order Nz=Invalid argument type in function set_default_load_endiannessrr`) isinstancer" TypeErrorrbrar rc)rcras r=r&r&s. j. 1j6LWXX0'FKKr?c:ddlm}|jjS)zn Get whether :func:`torch.save` computes and writes crc32 for each record. Defaults to ``True``. rr`rbrar compute_crc32r`s r=r#r#s 1 ;; $ $$r?ric2ddlm}||j_y)ac Set whether :func:`torch.save` computes and writes crc32 for each record. .. note:: Setting this to ``False`` may make unzipping of the ``torch.save`` output fail or warn due to corrupted CRC32. However ``torch.load`` will be able to load the file. Args: compute_crc32 (bool): set crc32 computation flag rr`Nrh)riras r=r$r$s1 -FKKr?c:ddlm}|jjS)z Get default mmap options for :func:`torch.load` with ``mmap=True``. Defaults to ``mmap.MAP_PRIVATE``. Returns: default_mmap_options: int rr`)rbrar mmap_flagsr`s r=r'r's1 ;; ! !!r?c:ddlm}|jjS)z| Gets alignment for storages in torch.save files/ Defaults to 64. Returns: storage_alginment: int rr`)rbrarstorage_alignmentr`s r=_get_storage_alignmentros1 ;; ( ((r?c@eZdZdZdeddfdZd dZdeded eddfd Zy) r(a Context manager or function to set default mmap options for :func:`torch.load` with ``mmap=True`` to flags. For now, only either ``mmap.MAP_PRIVATE`` or ``mmap.MAP_SHARED`` are supported. Please open an issue if you need any other option to be added here. .. note:: This feature is currently not supported for Windows. Args: flags: ``mmap.MAP_PRIVATE`` or ``mmap.MAP_SHARED`` flagsr^Nctr td|tk7r|tk7rt d|ddlm}|jj|_ ||j_y)NzHChanging the default mmap options is currently not supported for WindowszmInvalid argument in function set_default_mmap_options, expected mmap.MAP_PRIVATE or mmap.MAP_SHARED, but got rr`) IS_WINDOWS RuntimeErrorr4r5 ValueErrorrbrar rlprev)rGrqras r=rDz!set_default_mmap_options.__init__sf Z  K EZ$7IINQ  5KK** !& r?cyrQrOrGs r= __enter__z"set_default_mmap_options.__enter__ r?exc_type exc_value tracebackcFddlm}|j|j_y)Nrr`)rbrarvr rl)rGr{r|r}ras r=__exit__z!set_default_mmap_options.__exit__s4!% r?r^N) rIrJrK__doc__intrDryrrrOr?r=r(r(s@ 'c'd'  +++++r?r(c,tjy)zM Clears the list of globals that are safe for ``weights_only`` load. N)_weights_only_unpickler_clear_safe_globalsrOr?r=r)r) s//1r?c*tjS)zY Returns the list of user-added globals that are safe for ``weights_only`` load. )r_get_safe_globalsrOr?r=r*r*s # 4 4 66r?r,c.tj|y)a\ Marks the given globals as safe for ``weights_only`` load. For example, functions added to this list can be called during unpickling, classes could be instantiated and have state set. Each item in the list can either be a function/class or a tuple of the form (function/class, string) where string is the full path of the function/class. Within the serialized format, each function is identified with its full path as ``{__module__}.{__qualname__}``. When calling this API, you can provide this full path that should match the one in the checkpoint otherwise the default ``{fn.__module__}.{fn.__qualname__}`` will be used. Args: safe_globals (List[Union[Callable, Tuple[Callable, str]]]): list of globals to mark as safe Example: >>> # xdoctest: +SKIP("Can't torch.save(t, ...) as doctest thinks MyTensor is defined on torch.serialization") >>> import tempfile >>> class MyTensor(torch.Tensor): ... pass >>> t = MyTensor(torch.randn(2, 3)) >>> with tempfile.NamedTemporaryFile() as f: ... torch.save(t, f.name) # Running `torch.load(f.name, weights_only=True)` will fail with # Unsupported global: GLOBAL __main__.MyTensor was not an allowed global by default. # Check the code and make sure MyTensor is safe to be used when loaded from an arbitrary checkpoint. ... torch.serialization.add_safe_globals([MyTensor]) ... torch.load(f.name, weights_only=True) # MyTensor([[-0.5024, -1.8152, -0.5455], # [-0.8234, 2.0500, -0.3657]]) N)r_add_safe_globals)r,s r=r+r+sB--l;r?ceZdZdZy)r,a2Context-manager that adds certain globals as safe for ``weights_only`` load. Args: safe_globals: List of globals for weights_only load. Example: >>> # xdoctest: +SKIP("Can't torch.save(t, ...) as doctest thinks MyTensor is defined on torch.serialization") >>> import tempfile >>> class MyTensor(torch.Tensor): ... pass >>> t = MyTensor(torch.randn(2, 3)) >>> with tempfile.NamedTemporaryFile() as f: ... torch.save(t, f.name) # Running `torch.load(f.name, weights_only=True)` will fail with # Unsupported global: GLOBAL __main__.MyTensor was not an allowed global by default. # Check the code and make sure MyTensor is safe to be used when loaded from an arbitrary checkpoint. ... with torch.serialization.safe_globals([MyTensor]): ... torch.load(f.name, weights_only=True) # MyTensor([[-0.5024, -1.8152, -0.5455], # [-0.8234, 2.0500, -0.3657]]) >>> assert torch.serialization.get_safe_globals() == [] N)rIrJrKrrOr?r=r,r,=sr?fchttjj}ttjj}|j |}t |d5}t|s tdt|5}t|r tdtj|jd}tj|}t|j!|cdddcdddS#1swYnxYw dddy#1swYyxYw)aReturns a list of strings of functions/classes in a ``torch.save`` object that are not safe for ``weights_only``. For a given function or class ``f``, the corresponding string will be of the form ``{f.__module__}.{f.__name__}``. This function will return any GLOBALs in the checkpoint that are not in the set marked safe for ``weights_only`` (either via :func:`add_safe_globals` or :class:`safe_globals` context or allowlisted by ``torch`` by default). .. note:: This function will statically disassemble the pickle file in the checkpoint. The implication is any classes dynamically pushed onto the stack during unpickling will not be included in the output. Args: f: File-like object or string containing the checkpoint object saved via ``torch.save`` Returns: A list of strings of pickle GLOBALs in the checkpoint that are not allowlisted for ``weights_only``. rbz8Expected input to be a checkpoint returned by torch.savezYExpected input to be a checkpoint returned by torch.save but got a torchscript checkpointdata.pklN)setr_get_allowed_globalskeys_get_user_allowed_globalsunion_open_file_like _is_zipfileru_open_zipfile_reader_is_torchscript_zipioBytesIO get_recordget_globals_in_pkllist difference)rdefault_safe_globals_stringsuser_safe_global_stringssafe_global_strings opened_filezip_file data_file all_globalss r=r-r-Vs%*$'446;;=$  #99;@@B 7<<=UV D ! E[;'WX X !+ . E("8, o 8#6#6z#BCI1DDYOK ../BCD E E E E E E E E E Es%4"D(A)D? D(D D((D1c*eZdZdZddefdZdZdZy)r.a( Context-manager that skips writing/reading storage bytes for ``torch.save`` / ``torch.load`` calls. For the save path, storages will still be saved, but the space that their bytes would usually be written to will be empty space. The storage bytes can then be populated in a separate pass. For the load path, tensors will be loaded per the checkpoint but their storages will not be populated with data. .. warning:: The ``skip_data`` context manager is an early prototype and is subject to change. Args: materialize_fake_tensors: Whether to materialize FakeTensors during save. This is a no-op for the load path. Example: >>> # xdoctest: +SKIP("NamedTemporaryFile on Windows") >>> import tempfile >>> t = torch.randn(2, 3) >>> with tempfile.NamedTemporaryFile() as f: ... with torch.serialization.skip_data(): ... torch.save(t, f.name) ... torch.load(f.name, weights_only=True) tensor([[0., 0., 0.], [0., 0., 0.]]) rFc||_yrQ)rF)rGrFs r=rDzskip_data.__init__s (@%r?ctj|_tj|_dt_|jt_y)NT)_serialization_tlsr._old_skip_datarF_old_materialize_fake_tensorsrxs r=ryzskip_data.__enter__s60::-?-X-X*'+$6:6S6S3r?cX|jt_|jt_yrQ)rrr.rrF)rGtypevaluetbs r=rzskip_data.__exit__s '+':':$6:6X6X3r?N)F)rIrJrKrboolrDryrrOr?r=r.r.s!4AATYr?r.c|j}d}|jt|}|j|||k(S)NsPK)tellreadlenseek)rstartlocal_header_magic_number read_bytess r=rrs@ FFHE -567JFF5M 2 22r?prioritytagger deserializerc`|||f}tj|tjy)a Registers callables for tagging and deserializing storage objects with an associated priority. Tagging associates a device with a storage object at save time while deserializing moves a storage object to an appropriate device at load time. :attr:`tagger` and :attr:`deserializer` are run in the order given by their :attr:`priority` until a tagger/deserializer returns a value that is not `None`. To override the deserialization behavior for a device in the global registry, one can register a tagger with a higher priority than the existing tagger. This function can also be used to register a tagger and deserializer for new devices. Args: priority: Indicates the priority associated with the tagger and deserializer, where a lower value indicates higher priority. tagger: Callable that takes in a storage object and returns its tagged device as a string or None. deserializer: Callable that takes in storage object and a device string and returns a storage object on the appropriate device or None. Returns: `None` Example: >>> def ipu_tag(obj): >>> if obj.device.type == 'ipu': >>> return 'ipu' >>> def ipu_deserialize(obj, location): >>> if location.startswith('ipu'): >>> ipu = getattr(torch, "ipu", None) >>> assert ipu is not None, "IPU device module is not loaded" >>> assert torch.ipu.is_available(), "ipu is not available" >>> return obj.ipu(location) >>> torch.serialization.register_package(11, ipu_tag, ipu_deserialize) N)rWappendsort)rrr queue_elems r=rrs+PFL1JZ(r?Tc R |jjdtfdt|D}||k\}|S#t$r[}d|j d|jdt |}|r t||tj|dzd}Yd}~|Sd}~wwxYw) a Check if a module's version satisfies requirements Usually, a module's version string will be like 'x.y.z', which would be represented as a tuple (x, y, z), but sometimes it could be an unexpected format. If the version string does not match the given tuple's format up to the length of the tuple, then error and exit or emit a warning. Args: module: the module to check the version of req_version_tuple: tuple (usually of ints) representing the required version error_if_malformed: whether we should exit if module version string is malformed Returns: requirement_is_met: bool .c3LK|]\}}t||ywrQ)r).0idx req_field version_strss r= z8check_module_version_greater_or_equal..s- Y DOL- . s!$'z&' module version string is malformed 'z$' and cannot be compared with tuple z1, but continuing assuming that requirement is metTN) __version__splittuple enumerate ExceptionrIstrrtwarningswarn)modulereq_version_tupleerror_if_malformedmodule_versionrequirement_is_metemessagers @r=rrs*&))//4  "+,=">  ,/@@   & FvGYGYFZ[012 4  w'Q . MM'$WW X!%   &s=A B& AB!!B&c8|jjdk(ryyNcpudevicerobjs r=_cpu_tagr zz% r?c8|jjdk(ryyNmpsrrs r=_mps_tagrrr?c8|jjdk(ryy)Nmetarrs r= _meta_tagrs zz& !r?c|dk(rtjj}|jj|k(r=|jj |S|dzt |jj zSy)N privateuse1:)r9_C_get_privateuse1_backend_namerrindexr) backend_namers r= _backend_tagr#sf}$xx==?  zz,& ::   # #%CJJ,<,<(== = 'r?c|dk(r|SyrrOrlocations r=_cpu_deserializer-s5 r?cF|jdr|jSyr) startswithrrs r=_mps_deserializer2s!5!wwy"r?cX|dk(r%tj|jdSy)Nrr)r9UntypedStoragenbytesrs r=_meta_deserializer7s(6##CJJL@@r?c tt|std|jdt t|}t|drIt|j dr3|j j |d}tj||}n/tj|}|jr |jnd}t|dr0|js td|jd |d t|d r;|j}||k\r&td |jd |d|d|d |S)a Check whether the device index of specified backend is valid In case of privateuse1 backend, your must first register a device_module for privateuse1 using torch._register_device_module. Implement the following methods in device_module like cuda: device_module._utils._get_device_index(location, True), device_module.device_count(). Args: location: string of device backend_name: the backend name or the name of privateuse1, which can be renamed Returns: device_index: int zThe z device module is not registered. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU._utils_get_device_indexTr is_availablez&Attempting to deserialize object on a z device but torch.z.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU. device_countz$Attempting to deserialize object on z device z but torch.z.device_count() is zU. Please use torch.load with map_location to map your storages to an existing device.) r8r9rtuppergetattrrrrrrr)rr device_module device_indexrrs r=_validate_devicer<sX 5, '<%%'()/ /  E<0M}h'G1-%++==hM lL9h''-||v|| }n-m6P6P6R4\5G5G5I4JK ,~./ /  }n-$113 < '6|7I7I7K6LH. L>9L\N[))  Mr?c.t|djS)Ncudarrrs r=rrps Hf - 3 33r?c.t|djS)Nhpurrs r=rrts He , 2 22r?c|dk(rtjj}|j|rt ||}|j |Sy)Nrr)r9rrrrto)rrrrs r= _deserializer xsL}$xx==? <(!(L9vvVv$$)r? rrr xpustoragec~tD]\}}}||}|s|cStdtj|z)Nz-don't know how to determine data location of rWrtr9typename)r_rrs r=rrsJ* 61'? O 7%..:QQ r?ctD]\}}}|||}||cStdtj|zdz|zdz)a Restores `storage` using a deserializer function registered for the `location`. This function looks in the registry for deserializer functions that match the `location`. If found, it attempts to use them, in priority order, to restore `storage` until one returns a not `None` result. If no deserializer can be found in the registry, or all found fail to bear a result, it raises a `RuntimeError`. Args: storage (STORAGE): the storage object to restore location (str): the location tag associated with the storage object Returns: storage: Optional[STORAGE] Raises: RuntimeError: If no deserializer matching `location` is found in the registry or if all matching ones return `None`. z+don't know how to restore data location of z (tagged with )r)rrrfnresults r=rrso(&1bGX&  M 5 .. ! "        r?c6tt|jSrQ)rr9rI) storage_types r=rrs 5,// 00r?ct|}t|j}t||jj ddS)NrTensor)rrrJrrIreplace)rr rs r=rrs;=L !8!8 9F 6<0088HM NNr?name_or_buffercBt|ttjfSrQ)rerosPathLike)r$s r=_is_pathr(s nsBKK&8 99r?Tc(eZdZdeddfdZdZdZy)_opener file_liker^Nc||_yrQr,)rGr,s r=rDz_opener.__init__s %r?c|jSrQr.rxs r=ryz_opener.__enter__s ~~r?cyrQrOrGargss r=rz_opener.__exit__rzr?)rIrJrKr)rDryrrOr?r=r+r+s&!&& r?r+cVeZdZdeeej efdeddffd ZdZxZ S) _open_filenamemoder^Nc8t|t||yrQ)rCrDopen)rGr5r6rHs r=rDz_open_file.__init__s dD)*r?c8|jjyrQ)r,closer1s r=rz_open_file.__exit__ r?) rIrJrKr rr&r'rDrrLrMs@r=r4r4s5+U3 C(8#89+++r?r4c.eZdZdeeddffd ZxZS)_open_buffer_readerbufferr^Nc:t||t|yrQ)rCrD_check_seekable)rGr>rHs r=rDz_open_buffer_reader.__init__s  r?)rIrJrKr bytesrDrLrMs@r=r=r=s r%y T  r?r=ceZdZdZy)_open_buffer_writerc8|jjyrQ)r,flushr1s r=rz_open_buffer_writer.__exit__r;r?N)rIrJrKrrOr?r=rCrCsr?rCr6ct|r t||Sd|vr t|Sd|vr t|St d|)Nwrz$Expected 'r' or 'w' in mode but got )r(r4rCr=rt)r$r6s r=rrsN.$// $;&~6 6 D[&~6 6!EdVLM Mr?c8eZdZdeeeefddffd ZxZS)rr$r^Nc^t|tjj |yrQ)rCrDr9rPyTorchFileReader)rGr$rHs r=rDz_open_zipfile_reader.__init__s 33NCDr?) rIrJrKr rr rArDrLrMs@r=rrs,EuS"U)^'<EEEr?rc0eZdZdeddffd ZddZxZS)_open_zipfile_writer_filer5r^Nc d|_||_ |jjdt|t j j|jtty#t$rqtj|jd|_t|t j j|jttYywxYw)NasciirG)r6) file_streamr5encoderCrDr9rPyTorchFileWriterr#roUnicodeEncodeErrorrFileIO)rGr5rHs r=rDz"_open_zipfile_writer_file.__init__s   II  W % G **II024J4L "  "yy=D  G **$$&7&9;Q;S  sA55A7C/.C/c|jj|j|jjyyrQ)r,write_end_of_filerPr:r1s r=rz"_open_zipfile_writer_file.__exit__s5 ((*    '    " " $ (r?r)rIrJrKrrDrrLrMs@r=rMrMsST,%r?rMc6eZdZdeeddffd ZddZxZS)_open_zipfile_writer_bufferr>r^NcNtt|ddsIdtt|j dd}t |ds t |t|||_t|)tjj|tty)Nwritez Buffer of z<>z" has no callable attribute 'write')callablerrrstripr8AttributeErrorrfr>rCrDr9rrRr#ro)rGr>msgrHs r=rDz$_open_zipfile_writer_buffer.__init__$s67s4<066t<==_`C67+$S))C.    HH & &)+-C-E  r?cl|jj|jjyrQ)r,rVr>rEr1s r=rz$_open_zipfile_writer_buffer.__exit__1s" ((* r?r)rIrJrKr rArDrrLrMs@r=rXrX#s  r%y  T  r?rXcBt|rt}nt}||SrQ)r(rMrX)r$ containers r=_open_zipfile_writerrb6s- / ^ $$r?cDdg} |j|vS#t$rYywxYw)NgzipF)rJr])rcompress_moduless r=_is_compressed_filerf?s1x||/// s  ct|ry |jdk\S#tj$rYyt$rYywxYw)z Checks if f is a file that should be read directly. It should be read directly if it is backed by a real file (has a fileno) and is not a a compressed file (e.g. gzip) Fr)rffilenorUnsupportedOperationr]rs r=_should_read_directlyrkGsG 1xxzQ " " s!AAAcd} |j|jy#tjtf$r}|ddg|Yd}~yd}~wwxYw)Ncx|D]4}|t|vst|dzdzdz}t|||)Nz7. You can only torch.load from a file that is seekable.z; Please pre-load the data into a buffer like io.BytesIO andz try to load from it instead.)rr)patternsrpr^s r= raise_err_msgz&_check_seekable..raise_err_msgXs] #ACF{FOPST66 d1gcl" #r?TrrF)rrrrir])rrprs r=r@r@WsQ + qvvx  # #^ 4+vv&** +s%A AAc |k|jdk(r[d}t||dsKtdjdj |Dcgc] }t |c}|j yyycc}w)zChecks if using dill as the pickle module, and if so, checks if it is the correct version. If dill version is lower than 0.3.1, a ValueError is raised. Args: pickle_module: module used for pickling metadata and objects Ndill)rrZrYFz\'torch' supports dill >= {}, but you have dill {}. Please upgrade dill or switch to 'pickle'r)rIrruformatjoinrr)r;required_dill_versionnums r=_check_dill_versionrwls ]%;%;v%E )4 0% A&HH2GH3c#hHI!--   &F IsA/cJt|st|ds tdyy)NrZzOexpected 'f' to be string, path, or a file-like object with a 'write' attribute)r(r8r]rjs r=_check_save_filelikerys+ A;wq'2 "   3;r?Frr;pickle_protocol_use_new_zipfile_serialization_disable_byteorder_recordctjjdt|t |t |t tjfrtj|}|r%t|5}t||||| dddytjr tdt|d5}t!||||dddy#1swYyxYw#1swYyxYw)asave(obj, f, pickle_module=pickle, pickle_protocol=2, _use_new_zipfile_serialization=True) Saves an object to a disk file. See also: :ref:`saving-loading-tensors` See :ref:`layout-control` for more advanced tools to manipulate a checkpoint. Args: obj: saved object f: a file-like object (has to implement write and flush) or a string or os.PathLike object containing a file name pickle_module: module used for pickling metadata and objects pickle_protocol: can be specified to override the default protocol .. note:: A common PyTorch convention is to save tensors using .pt file extension. .. note:: PyTorch preserves storage sharing across serialization. See :ref:`preserve-storage-sharing` for more details. .. note:: The 1.6 release of PyTorch switched ``torch.save`` to use a new zipfile-based file format. ``torch.load`` still retains the ability to load files in the old format. If for any reason you want ``torch.save`` to use the old format, pass the kwarg ``_use_new_zipfile_serialization=False``. Example: >>> # xdoctest: +SKIP("makes cwd dirty") >>> # Save to file >>> x = torch.tensor([0, 1, 2, 3, 4]) >>> torch.save(x, "tensor.pt") >>> # Save to io.BytesIO buffer >>> buffer = io.BytesIO() >>> torch.save(x, buffer) z torch.saveNzCCannot use skip_data=True with _use_new_zipfile_serialization=Falsewb)r9r_log_api_usage_oncerwryrerr&r'fspathrb_saverr.rtr _legacy_save)rrr;rzr{r|opened_zipfilers r=rrsd HH  . &!c2;;'( IIaL% !! $  )       ' 'U Q % K k=/ J K K   K Ks8C9CCC&c  ddlm i iidtdttf fd t t jdk(tttdd}|jt|||jt |||j|||G fd d |j}|||}|j|tj}|j||||j!|D]D}|\} } | j#|t%|d t&j(j+| Fy) Nrrr^c$t|trKt|jr5|vryd|<dx}} t |\}}}dj |}d|||fSt|tjjstj|rt|tjjrU|j}|j}|j!}t#t|}|j} |j%} nxt|tj&rG|}tj(}t+t|}tj(} |j-} nt/dt||j1dk7rF|j1vr!||j1k7rt3d||j1<d} t5|j6} t9|} | vr|| f| <|j6|j6k7}|r't5|j6| |j-f}nd}d || | | |f}|Sy#t $r)tjd|jzdzY7wxYw) NT4Couldn't retrieve source code for container of type 3. It won't be checked for correctness upon loading.rztype not recognized: rSCannot save multiple tensors or storages that view the same data as different typesr)rer issubclassModulerrtrrrrIr9r TypedStorage is_storage_untyped_storagedtype_pickle_storage_typer_sizeruint8rrrfdata_ptrrtr_cdatar)r source_filesource source_linesrr storage_dtypestorage_type_strr r storage_numeloffset storage_keyris_view view_metadataresnnserialized_container_typesserialized_storagesstorage_dtypess r= persistent_idz#_legacy_save..persistent_idsz c4 ZRYY%?00.2 &s +#' 'K& /H/M, a.c;7 7 c5==55 6%:J:J3:O#u}}99:.. # #&#;#;#= &u.>?   # C!5!56 % 5d3i@   ' 0 "7S { CDD !Q&##%7$w7G7G7I(JJ*D :GN7#3#3#56 Fgnn-K#G,H>"554;U3C#K0nn6G!$W^^!4fgnn>N O $  CJE  !ll+.44 s I.JJlittle)shortrlong)protocol_version little_endian type_sizesprotocolceZdZfdZy)*_legacy_save..PyTorchLegacyPicklerc|SrQrOrGrrs r=rz8_legacy_save..PyTorchLegacyPickler.persistent_idg % %r?NrIrJrKrrsr=PyTorchLegacyPicklerrf &r?rT)torch.nnrrr rPROTOCOL_VERSIONsys byteorder SHORT_SIZEINT_SIZE LONG_SIZEdump MAGIC_NUMBERPicklersortedrrE _write_filerkr9r _element_size)rrr;rzsys_inforpicklerserialized_storage_keyskeyrrrrrrrs @@@@@r=rrsK!#OQ .0Nq3q8E?qqh-(2 H|QA'_Ex_=&}44&#1?G LL$%8%=%=%?@.OLGGI& ,S1 $Q'u||/I/I%/P  r?chiiifdtj}Gfdd|j}|||}|j||j }|j d|t ||j ddt dtt} |j d| t | |sftjd vrtd tjz|j d tjt tjjD]h} d | } | } | j} tjr|j!| | A| j"j$d k7rddlm}|j*j,rt.j0j3dx} |j$| j"j$k(rt/j4| t.j6d dj9}|j;| t.j0j=| j"j>jA|} n| jC} |j | | | ky)Nct|tjjstj|rt|tjjrI|j }|j }|j}tt|}|j}n6|}tj}tt|}|j}t|jdk7rY|j!dk7rF|j! vr!| |j!k7rt#d| |j!<j%|j&tt)}t+|dr"|j,t|j,}n t/|}| |<d||||fSy)Nrrr _fake_devicer)rer9rrrrrrrrrrrrrrrrt setdefaultrrr8rr) rrrrr rrrid_maprrs r=rz_save..persistent_ids c5==55 6%:J:J3:O#u}}99:.. # #&#;#;#= &u.>? #  % 5d3i@ ' 0 7>>"f,1A1A1Cq1H##%7$w7G7G7I(JJ*D :GN7#3#3#56 ++GNNCF .PyTorchPicklerc|SrQrOrs r=rz+_save..PyTorchPickler.persistent_idrr?Nrrsr=PyTorchPicklerrrr?rrr.format_version1.storage_alignment)rbigUnknown endianness type: rdata/rrr`T)check_available)rr pin_memory)"rrrrgetvalue write_recordrrrorrrurrrr.write_record_metadatarrrbraruse_pinned_memory_for_d2hr9 acceleratorcurrent_acceleratoremptyruntyped_storagecopy_current_streamr synchronizer)rrr;rzr|data_bufrr data_valuernrr5r num_bytesraacc new_storagerrrrs @@@@r=rrwsFDFF .0N,^zz|H&..&X@G LL""$J *j#j/B  +S#c(;245 /5F1G % == 1 183==HI Ik3==#cmm:LM#'')!<se}%c*NN$  ' '  * *4 ; ~~""e+<KK99$00DD,0 E   GNN$7$77"'++!Ut#%o'  %%g.%%44W^^5I5IJVVX)G%kkmG  ! !$ ;C!>> # xdoctest: +SKIP("undefined filepaths") >>> torch.load("tensors.pt", weights_only=True) # Load all tensors onto the CPU >>> torch.load( ... "tensors.pt", ... map_location=torch.device("cpu"), ... weights_only=True, ... ) # Load all tensors onto the CPU, using a function >>> torch.load( ... "tensors.pt", ... map_location=lambda storage, loc: storage, ... weights_only=True, ... ) # Load all tensors onto GPU 1 >>> torch.load( ... "tensors.pt", ... map_location=lambda storage, loc: storage.cuda(1), ... weights_only=True, ... ) # type: ignore[attr-defined] # Map tensors from GPU 1 to GPU 0 >>> torch.load( ... "tensors.pt", ... map_location={"cuda:1": "cuda:0"}, ... weights_only=True, ... ) # Load tensor from io.BytesIO object # Loading from a buffer setting weights_only=False, warning this can be unsafe >>> with open("tensor.pt", "rb") as f: ... buffer = io.BytesIO(f.read()) >>> torch.load(buffer, weights_only=False) # Load a module with 'ascii' encoding for unpickling # Loading from a module setting weights_only=False, warning this can be unsafe >>> torch.load("module.pt", encoding="ascii", weights_only=False) z torch.loadz Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.rr^c"d}tj||du}d}tj||du}d}tj||du}|rdtd|z}|zS|r d|dtdSdtd}|s|d z }|d |zz }|zS) Nz2GLOBAL (\S+) was not an allowed global by default.zwhose module (\S+) is blockedz(\S+) must be (\S+) to loadzWeights only load failed. This file can still be loaded, to do so you have two options, do those steps only if you trust the source of the checkpoint. (1) z (2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message. WeightsUnpickler error: zWeights only load failed. z  zPlease file an issue with the following so that we can make `weights_only=True` compatible with your use case: WeightsUnpickler error: z )researchUNSAFE_MESSAGE) runsafe_global_patternhas_unsafe_globalblocklist_pattern has_blocklistimport_pattern has_importupdated_message DOCS_MESSAGEs r=_get_wo_messagezload.._get_wo_message|s UII&;WETQ< "3W=TI 7YY~w7tC )*+dd  $--3G9C?OrRR$>~>Nb"Q$#fO v/ /O--r?N)ryyestrueTORCH_FORCE_WEIGHTS_ONLY_LOAD0 TORCH_FORCE_NO_WEIGHTS_ONLY_LOADzsOnly one of `TORCH_FORCE_WEIGHTS_ONLY_LOAD` or `TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD` should be set, but both were set.TzEnvironment variable TORCH_FORCE_NO_WEIGHTS_ONLY_LOAD detected, since the`weights_only` argument was not explicitly passed to `torch.load`, forcing weights_only=False.r/) stacklevelFzDCan not safely load weights when explicit pickle_module is specifiedrr`encodingzutf-8rz'torch.load' received a zip file that looks like a TorchScript archive dispatching to 'torch.jit.load' (call 'torch.jit.load' directly to silence this warning)zUCannot use ``weights_only=True`` with TorchScript archives passed to ``torch.load``. )rEz7f must be a file path in order to use the mmap argumentoverall_storagerz, z8mmap can only be used with files saved with `torch.save(zn_use_new_zipfile_serialization=True), please torch.save your checkpoint with this option in order to use mmap.))r9rrrr>r&getenvrtrr UserWarningpicklerbrar rrwrrrrrrrrjitr(rurVgetsizersr'r5r from_filer_loadrUnpicklingErrorre _legacy_load)rrEr;rrrrweights_only_not_set true_valuesforce_weights_only_loadforce_no_weights_only_loadrar orig_positionrrsizesharedrf_namers @r=r r s~ HH  . R ...8(4// > +K 137;F 4c:kI#= 0   ! #  MMq   !L  $V   "M |4{{ &)..00'.$ D !H [ { #(,,.M"O%k2- n&~6MM1$  $*/1?@ $$]3 99>>+L>Q- - H H ,#A;(U77??1-D%!9!;z!I!&&+&:&:&D&D ! fd'O  X$*(3 -<   /  =- - H H \" !%4  ' O- - H H j )!S1R!BxF%h'[[   P# +' }H H L } 8H MH H X"11X$44_SV5LMSWWXK- - z)) P,,_SV-DE4O PIH H s*M4=A+L(( M4;BL(K( M4)L(8 M4 *M46L4 M4(L%;%L  L%%L((L1 -M44M1%M,,M11M44M=ctj}|sLtjj D]+}t |tj s||t|<-||S)z;Get layout extension object from its string representation.) _get_layoutcacher9__dict__valuesrelayoutr)r5rvs r=rrsS   E &&( "A!U\\* !c!f  " ;r?c&tt|ffSrQ)rrrs r=r%s+C{)Cr?c Hit|Gddjdfd}ifd}t|t|}|r|j dk(r ||Sj|fi}|tk7r tdj|fi}|tk7rtd |j|fi} |fi} || _| j} j|fi} tj j#t$j&s~|r|j nd} | D]e}|vsJ|}|j(j+|| |tj,j/|j0| V|j } gtj,j3| S#t j $r9t|rt|jdd|jdYwxYw) NceZdZfdZxZS)&_legacy_load..UnpicklerWrapperct|turd|vr t|St|||S#t$rYwxYwNr)rrr!KeyErrorrC find_class)rGmod_namer5rHs r=r#z1_legacy_load..UnpicklerWrapper.find_class.sNDzS Y$%6&t,,7%h5 5 s 3 ??rIrJrKr#rLrMs@r=UnpicklerWrapperr-s  6 6r?r&c> djt|d}||k7r"|j r|j dz}tj|jd|jd||d}dj|} t|d5}|jdd }|jd|dk(r|j|n'|t|k7s|j|k7rtdddd |zd z|zd z} nd} dt!j"|d| } tj| t$yy#t$r(tjd|j zdzYywxYw#1swYxYw#t$r d |zdz} YwxYw)Nrrrrz.patchr)linetermza+r/zSaved a reverse patch to z. Run `patch -p0 < z` to revert your changes.z;Tried to save a patch, but couldn't create a writable file zD. Make sure it doesn't exist and your working directory is writable.zyou can retrieve the original source code by accessing the object's source attribute or set `torch.nn.Module.dump_patches = True` and use the patch tool to revert the changes.zsource code of class 'z' has changed. )rtrrrrrI dump_patchesdifflib unified_diffrr8rrZrrOSErrorr9rr) container_typeroriginal_sourcecurrent_source file_namedifflinesr file_sizer^s r=_check_container_sourcez-_legacy_load.._check_container_source6s WW%>~%Nq%QRN n ,***33h> ++"((.#))$/  $i.*!$%FF1aL q $>GGEN&#e*4E8I")M *4i?C,,.78;##8 +5>>.+I*J/Z]Y^_C MM#2 3O -  MM(112500    &**)+458$$s<E FA"E<8F.E98E9<FFFFc i!fd}ttj|dtj5}t 5}#t urt dtz|jd|ttjj|ddd5}#j|fi"}t|D]}#j|fi"}|\}}} | j} tt t"j$j'|t"j(j+| } $| |} t"j,j/| | d |<#j|fi"} | D]|\} }}}|}t"j(j+|j0}||z}t"j,j/|j2||||zz|j0d | <~ ddd|jd |ttjj|d dd5}#j|fi"}t|D]}#j|fi"}|\}}}|}t5j6d |j9d \}|j9d t5j6d|d|j9d|z}t5j6d|d|j9d|z}t5j6d|j9d\}t#j:d|j0j=|j2|||}||< ddd|j?d} |fi"}||_ |j}|cdddcdddS#1swYxYw#1swY[xYw#1swYnxYwdddy#1swYyxYw)Ncpt|trt|ddr||dSt|S)NrYr)rerallr)saved_idr4deserialized_objectss r=persistent_loadz:_legacy_load..legacy_load..persistent_loadls<(E*x|$+X6{"'H 6 6r?zr:)fileobjr6rszMCannot use ``weights_only=True`` with files saved in the legacy .tar format. storagesrUrrT wrap_storager _internaltensorsz.legacy_loadis/1 7  QT':L:LM A  I A   77"+-;< KK K 0bggll6:6a@ A1}11!H7GH |, A-=--aD3CDD26/C<(//Ew(<(<=LL5<<55e<C+39C16 0J0J%(1K1(- !3 2 21 I8H I ?L ;L*fe/ ;D#(<<#=#=djj#IL#)L#8L:?9S9S%)%:%:(<%,:N+N&#jj"& :T:(6 # > KK K /bggll695tQ? 710m00G6FG {+ 7A-=--aD3CDD=A:C%:2:>G$mmD!&&).persistent_loadsc(E***&x{3| x 48}'.7N  "EI BL(He]*84H &&EU\\77>>F33==113?w(<(  **,1$)MM$>$>,==DD#"&%?%M (.;+&)% (B(B5(II "+ell.H.H.O"O#776;]]5O5O%2%C%C(H@A215!&&(a- q> !&=%%a<+;+>?   ! " LL002 MO 1~"vvhWX FF1I s*GAH! H! bytes_strcHt|tr|jdS|S)NrO)rerAdecode)rzs r=rfrf+s%)U#(( r?ct}|Sttrfd}|Stttfrfd}|Stt j rfd}|Sfd}|S)Nc@j||}t||SrQ)getrrrrEs r=rcz/_get_restore_location..restore_location<s"#''(;H+GX> >r?ct|SrQrrs r=rcz/_get_restore_location..restore_locationBs+G\B Br?c.t|tSrQ)rrrs r=rcz/_get_restore_location..restore_locationGs+GS5FG Gr?c6||}| t||}|SrQr)rrrrEs r=rcz/_get_restore_location..restore_locationLs%!'84F~1'8DMr?)rredictrrAr9r)rErcs` r=rqrq7sy32 1 L$ ' ?, % L3, / C  L%,, / H    r?c(eZdZdZedZdZy)r!c$t||_yrQ)rrJ)rGr5s r=rDzStorageType.__init__Vs9$? r?c|jSrQ)rJrxs r=rzStorageType.dtypeYs {{r?c"d|jdS)NzStorageType(dtype=rrErxs r=__str__zStorageType.__str__]s#DJJjAt<jBjEddjGi| S)NFr1r)littlebigrrrzInvalid load endianness type@rraThe default load endianness for checkpoints without a byteorder mark on big endian machines was changed from 'native' to 'little' endian, to avoid this behavior please use torch.serialization.set_default_load_endianness to set the desired default load endiannessrr`TORCH_SERIALIZATION_DEBUGrrrlc| vr |}|S,|dk(sJ j| j|}}n j|| }}| |<||z|dkDr|k\s|k\rz |Sz |S)a Return the offset of the storage associated with key with record name `name` and size numel. It is expected that the zipfile header of this storage starts at current_offset. WARNING: This function relies on the behavior of the zipwriter in miniz.c. In particular, the behavior of `mz_zip_writer_add_mem_ex_v2`. The behavior of this function must be kept in sync with that of miniz! After reading a storage of size numel that starts at storage_offset if it is the first time that storage was read, update nonlocal variable current_offset to the start of the next zipfile header by incrementing it by numel and the data descriptor size. rr)get_record_offsetget_record_header_offsetget_record_offset_no_read) rr5rVr_local_header_offsetcurrent_offsetdata_descripter_size32data_descripter_size64 mz_uint32_maxoffsetsrnrs r= _get_offsetz_load.._get_offsets 7?$T]N! !  !#: :%77=N"*"C"CD"I +N%??e->N#1  ' (%/ 19"m3u 7M"88"88r?c  d|}tjjdd|tjj |z}tj |d} r ||||_nAj||_n)tjr8|tjj |z}tj |}na rE rC |||}rH|j|k7r4td|d|dj|j|}|||z}n~ rCrA |||}|j|k7r#td|d|dj|j||tj jj} 2 jtj k7r|j#|tjjd ||}n ||_|}tj&j)||d} | j+d k7r| |<| S) NrrrzzThis is a debug assert that was run as the `TORCH_SERIALIZATION_DEBUG` environment variable was set: Incorrect offset for z, got z expected Tr=r)r9rgdetect_fake_moderrr_checkpoint_offsetrrr.rtget_storage_from_record_typed_storagerr|rrbyteswaprrrrj)rrVrrr5rrr_r>rmr byteorderdatacalculate_storage_offsetscan_calculate_storage_offsetsloaded_storagesrrcrun_debug_assertsrs r= load_tensorz_load..load_tensorsbse} == ) )$ / ;U\\77>>F**6&AG,-8dE-J*-5-G-G-M*  ) )U\\77>>F**62G  (,1J!,S$!>$%)C)CD)II*FFJV6R`Qaak'99$?@B "*!;!;D!A%n~7MNG,1B"-S$!>!X%?%?%EE&BBFvnM]]g#55d;<> 00ue>R>RS!!!   $##%6  ' == ) )$ / 7+GX>L#+G "L 22%3  " " $ )#0OC r?c`t|tsJt|d}|dd}|dk(s Jd|d|\}}}}|tjurtj }n |j }| vr |}|S|tjj|z}  || |t|}|S)NrrYrzBUnknown typename for persistent_load, expected 'storage' but got 'r) rerrfr9rrrrr) r8rrkr rrrVrrmrrrs r=r:z_load..persistent_loads(E***&x{3|9$ PQYPZZ[ \ $.2* c8U 5// /KKE &&E / !+C0M U\\77>>F'vs$7$AMr?z torch.tensorz torch._tensorc"eZdZfdZxZS)_load..UnpicklerWrapperct|turd|vr t|Sj ||}t |||S#t$rY-wxYwr!)rrr!r"rrCr#)rGr$r5rHload_module_mappings r=r#z*_load..UnpicklerWrapper.find_class8s`DzS Y$%6&t,,+..xBH7%h5 5 s A AAr%)rHrs@r=r&r4s  6 6r?r&ztorch.load.metadataserialization_id)$rq has_recordrrur|r%r"r\r]r[rrrrrrrbrar rr&environrrrrrrr:rrEr9rrur_log_api_usage_metadatar)rrEr;rarrr: byteordernamerar:r&rrbrrrrrrrrrrrrrrcrrns` ` @@@@@@@@@@@@@@@r=r r asm-\:O$)!,-%%&78(/4% MM=) ++M:  3 38=;O;O;QQR R#%)>)>> & ( 0! $ &.*<*< < $ &.*?*? ? 788/0 3 34H IJ    . ' ) 1 MMU "   2   1 & E E 'BCHCONM"fG,,\@@D4 + 6=22 6 8..{;?I /I'3# ^^ F&*# LL002 HH$$ 2H4M4M4OP Mr?c&d|jvS)Nz constants.pkl)get_all_records)rs r=rrTs h668 88r?r)T)rN)copyregr* functoolsrr&rrrSrLrrFrR threadingr contextlibrrenumrtypingrrrr r r r r typing_extensionsrrr9torch._weights_only_unpicklerrtorch._sourcesr torch._utilsr torch.storager torch.typesrr__all__DEFAULT_PROTOCOLStructrrrrrrSTORAGE_KEY_SEPARATORrrrr1__annotations__rrrr2platformrsrrr4r5r>localrArWarningrrrWrrrr"r%r&rr#r$r'ror(r)r*r+ _safe_globalsr,r-r.rrrrrrrrrrrrrr partialrrrrobjectr'r(r)r+rAr4r=rCrrrKrrRrMrXrbrfrkr@rwryrrrr rrrr rfrqr!r rrOr?r=rs  .MMM/ ?4,=) : FMM$  $ $ 6==  # # V]]4 % % % " (GS>7* +U\\3S#X NO i7EMM$>$>@T@TTUU \\W $ ,,(J 34)//4)* '  4  'HSM)*'3'!223 5 T "Xn%= " ($%4%.T." "(3- " ) )$+$+N27$uXuXs]/C%CDE7!<4hhm8L.L(M#N!8G+<<=*`)X   >  A 1h43%X/0IlF+IlF+ X/0Y 12IlM2IlM2 IlE*IlE* IlE*IlE*  7EMM668L8LL M B1O :V:uS"++5E/F(G: CL gaj E# '"U), '"U), NH NC NGBuIE %(B(B C%:'%((*D*D"E&%sBuI~)>%7%d  $*0  ++/&+ JK JKJKJK JK %) JK $ JK JKZ[ |}